branch_name
stringclasses
22 values
content
stringlengths
18
81.8M
directory_id
stringlengths
40
40
languages
listlengths
1
36
num_files
int64
1
7.38k
repo_language
stringclasses
151 values
repo_name
stringlengths
7
101
revision_id
stringlengths
40
40
snapshot_id
stringlengths
40
40
refs/heads/main
<repo_name>Bwise1/github_actions<file_sep>/.github/workflows/debug.yml name: Debug on: [] jobs: debug: name: Debug runs-on: ubuntu-latest steps: - name: Check out code uses: actions/checkout@v2 - name: Run tmate uses: mxschmitt/action-tmate@v2
3d6d3851d86c6bb7ac4e4f66344eb7a8e5a2b7c3
[ "YAML" ]
1
YAML
Bwise1/github_actions
2305bcd222b2106941153a6f933aea8ab852c7ed
826dc9f3b4c35e9aec8d81374f2e33adc25832d9
refs/heads/master
<repo_name>meszarosadel/PlanningPokerV2<file_sep>/app/src/main/java/com/example/planningpokerfb/FragmentUser.java package com.example.planningpokerfb; import android.os.Bundle; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import androidx.fragment.app.Fragment; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import com.example.planningpokerfb.Adapters.RecyclerViewGroupAdapterUser; import com.example.planningpokerfb.DatabaseHelper.FirebaseDatabaseHelper; import com.example.planningpokerfb.Models.Groups; import com.google.firebase.database.DataSnapshot; import com.google.firebase.database.DatabaseError; import com.google.firebase.database.DatabaseReference; import com.google.firebase.database.FirebaseDatabase; import com.google.firebase.database.ValueEventListener; import java.util.ArrayList; public class FragmentUser extends Fragment { FirebaseDatabaseHelper myDb; RecyclerView recyclerView; @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_fragment_user, container, false); myDb = new FirebaseDatabaseHelper(); recyclerView = view.findViewById(R.id.rv_user); recyclerView.setLayoutManager(new LinearLayoutManager(getActivity())); initGroups(); return view; } private void initGroups(){ final FirebaseDatabase database = FirebaseDatabase.getInstance(); DatabaseReference ref = database.getReference("Groups"); ref.addValueEventListener(new ValueEventListener() { ArrayList<Groups> gNames = new ArrayList<>(); @Override public void onDataChange(DataSnapshot dataSnapshot) { for (DataSnapshot productSnapshot: dataSnapshot.getChildren()) { Groups product = productSnapshot.getValue(Groups.class); gNames.add(product); } RecyclerViewGroupAdapterUser mAdapter = new RecyclerViewGroupAdapterUser(getActivity(), gNames); recyclerView.setAdapter(mAdapter); } @Override public void onCancelled(DatabaseError databaseError) { System.out.println("The read failed: " + databaseError.getCode()); } }); } } <file_sep>/app/src/main/java/com/example/planningpokerfb/Models/UserRole.java package com.example.planningpokerfb.Models; import com.google.firebase.database.DatabaseReference; import com.google.firebase.database.FirebaseDatabase; public class UserRole { public String id; public String email; public String role; public UserRole(){ } public UserRole(String id, String email, String role){ this.id = id; this.email=email; this.role=role; } public String getId() { return id; } public String getEmail() { return email; } public String getRole() { return role; } } <file_sep>/app/src/main/java/com/example/planningpokerfb/Models/Groups.java package com.example.planningpokerfb.Models; import java.util.ArrayList; import java.util.List; public class Groups { private String groupId; private String groupName; private boolean active; private int timeSpan; private List<String> userIds; public String getGroupId(){ return this.groupId; } public String getGroupName(){ return this.groupName; } public boolean isActive(){ return this.active; } public int getTimeSpan(){ return this.timeSpan; } public Groups(String groupId, String groupName, boolean active, int timeSpan){ this.groupId = groupId; this.groupName = groupName; this.active = active; this.timeSpan = timeSpan; this.userIds = new ArrayList<>(); } public void addUser(String userId){ this.userIds.add(userId); } public Groups(){} }<file_sep>/settings.gradle include ':app' rootProject.name='PlanningPokerFB'
e2937514256b7bd26a662719e39c671f76c2d910
[ "Java", "Gradle" ]
4
Java
meszarosadel/PlanningPokerV2
f443b25a1ebf8055242e958a32eff6ddcc23fa25
5cdfd99d2526501e902c71b6cd5647fc88821367
refs/heads/master
<repo_name>abanob123/openCV<file_sep>/opencv.sh #!/bin/bash # INSTALL THE DEPENDENCIES # Build tools: sudo apt-get install -y build-essential cmake pkg-config # GUI (if you want to use GTK instead of Qt, replace 'qt5-default' with 'libgtkglext1-dev' and remove '-DWITH_QT=ON' option in CMake): sudo apt-get install -y qt5-default libvtk6-dev libgtk2.0-dev # Media I/O: sudo apt-get install -y zlib1g-dev libjpeg-dev libwebp-dev libpng-dev libtiff5-dev libjasper-dev libopenexr-dev libgdal-dev ffmpeg gstreamer1.0 # Video I/O: sudo apt-get install -y libdc1394-22-dev libavcodec-dev libavformat-dev libswscale-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev yasm libopencore-amrnb-dev libopencore-amrwb-dev libv4l-dev libxine2-dev # Parallelism and linear algebra libraries: sudo apt-get install -y libtbb2 libtbb-dev libeigen3-dev octave # Python: sudo apt-get install -y python-dev python-tk python-numpy python3-dev python3-tk python3-numpy pyside-tools # Java: sudo apt-get install -y ant default-jdk # Documentation: sudo apt-get install -y doxygen # KEEP UBUNTU OR DEBIAN UP TO DATE sudo apt-get -y update sudo apt-get -y upgrade sudo apt-get -y dist-upgrade sudo apt-get -y autoremove # INSTALL THE LIBRARY sudo apt-get install -y unzip wget cd ~ wget https://github.com/opencv/opencv/archive/3.2.0.zip unzip 3.2.0.zip rm 3.2.0.zip mv opencv-3.2.0 OpenCV cd OpenCV mkdir build cd build cmake -DWITH_QT=ON -DWITH_OPENGL=ON -DFORCE_VTK=ON -DWITH_TBB=ON -DWITH_GDAL=ON -DWITH_XINE=ON -DBUILD_EXAMPLES=ON -DENABLE_PRECOMPILED_HEADERS=OFF .. make sudo make install sudo ldconfig #Test openCV python OpenCV/samples/python/opencv_version.py #This script written over <NAME> script >> https://milq.github.io/install-opencv-ubuntu-debian/ #Feel free to copy it, share it, edit it <file_sep>/README.md # It's a script to install openCV on Debian and Debian based distro's #-The script tested on kali linux 2.0 (sana) and works fine #-After Installation it should print openCV version and welcome message #-Use the following commands to install the libaray (using root privileges ): #wget https://raw.githubusercontent.com/EmadGKamel/openCV/master/opencv.sh #chmod 700 opencv.sh #./opencv
7cdd93a86ea6d7019a04716d50037126dbae8f51
[ "Markdown", "Shell" ]
2
Markdown
abanob123/openCV
7cd39d65696c44aee5e54d6bcd769b9dab847c7b
ce5e702446f18859d7bcdfc0209535432a94c2e6
refs/heads/master
<repo_name>flapatate/analyseur_MI<file_sep>/README.md "# analyseur_MI Compile les MI d un corpus"
d8b578410fe095162e6aec852167deeca0d83086
[ "Markdown" ]
1
Markdown
flapatate/analyseur_MI
181e4430b30a2b3183ac31a6a4b8b9286d06e658
5998119e83b106156713457d6d5452a0f65094f6
refs/heads/master
<repo_name>truemail-rb/truemail-ruby-client<file_sep>/spec/support/helpers/configuration_helper.rb # frozen_string_literal: true require 'securerandom' module ConfigurationHelper def configuration_block(**configuration_settings) lambda do |config| configuration_settings.each do |attribute, value| config.public_send(:"#{attribute}=", value) end end end def create_token SecureRandom.uuid end def configure_client(**configuration_settings) Truemail::Client.reset_configuration! configuration_settings[:host] = FFaker::Internet.domain_name unless configuration_settings[:host] configuration_settings[:token] = create_token unless configuration_settings[:token] Truemail::Client.configure(&configuration_block(**configuration_settings)) end def client_configuration Truemail::Client.configuration end end <file_sep>/CHANGELOG.md # Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.5.1] - 2023-08-14 ### Updated - Updated development dependencies - Updated gemspecs - Updated `rubocop`/`codeclimate`/`circleci` configs - Updated gem version ## [0.5.0] - 2023-02-18 ### Added - Added supporting of latest Ruby 3.2.0 - Added new bunch of project linters - Added auto deploy to RubyGems - Added auto creating release notes on GitHub ### Updated - Updated development dependencies - Updated gemspecs - Updated `rubocop`/`codeclimate`/`circleci` configs - Updated gem version ### Removed - Removed `overcommit` dependency ## [0.4.1] - 2022-03-23 ### Added - Development environment guide ### Changed - Updated gemspecs - Updated `rubocop`/`codeclimate`/`circleci` configs - Updated gem version ## [0.4.0] - 2022-01-26 ### Changed - Updated gemspecs - Updated `rubocop`/`codeclimate`/`simplecov`/`circleci` configs - Updated gem version ## [0.3.4] - 2021-09-16 ### Changed - Updated gem dependencies - Updated `rubocop`/`codeclimate` config - Updated gem version ## [0.3.3] - 2021-07-13 ### Fixed - Security vulnerability [CVE-2021-32740](https://github.com/advisories/GHSA-jxhc-q857-3j6g) ### Changed - Updated gem dependencies - Updated `rubocop`/`codeclimate` config - Updated gem version ## [0.3.2] - 2021-05-19 ### Fixed - Gem syntax compatibility with Ruby 3.x ### Changed - Updated gem dependencies - Updated `rubocop`/`codeclimate` config - Updated gem version ## [0.3.1] - 2021-05-08 ### Changed - Updated gem dependencies - Updated `rubocop`/`codeclimate` config - Updated gem version ## [0.3.0] - 2020-10-28 ### Changed Truemail client sends encoded uri params, follows [RFC 3986, sec 2.1](https://tools.ietf.org/html/rfc3986#section-2.1). - Updated `Truemail::Client::Http#request_uri` - Updated gem dependencies - Updated gem version, changelog, docs ## [0.2.1] - 2020-10-05 ### Fixed - Removed auth headers for public endpoint request ### Changed - `Truemail::Client::Http#run` - Updated gem version ## [0.2.0] - 2020-10-04 ### Added Ability to use Truemail healthcheck endpoint - Added `Truemail::Client.server_healthy?` ### Changed - Updated `Truemail::Client::Http#uri` - Updated gem dependencies - Updated gem documentation - Updated gem version ## [0.1.1] - 2020-07-21 ### Changed - Updated gem dependencies - Updated gem documentation - Updated gem version ## [0.1.0] - 2020-03-29 ### First release - implemented first version of Truemail Ruby client <file_sep>/spec/support/helpers/configuration_helper_spec.rb # frozen_string_literal: true RSpec.describe ConfigurationHelper, type: :helper do describe '#configuration_block' do let(:configuration_params) { { param_1: 1, param_2: 2 } } let(:configuration_instance) { ::Struct.new(*configuration_params.keys).new } before { configuration_block(**configuration_params).call(configuration_instance) } specify { expect(configuration_block).to be_an_instance_of(::Proc) } it 'sets configuration instance attributes' do configuration_params.each do |attribute, value| expect(configuration_instance.public_send(attribute)).to eq(value) end end end describe '#create_token' do it 'returns secure token' do expect(SecureRandom).to receive(:uuid).and_call_original expect(create_token).to be_an_instance_of(::String) end end describe '#configure_client' do subject(:configuration_builder) { configure_client(**params) } let(:params) { {} } context 'with default params' do it 'configures client, returns configuration instance with random host, token' do expect(Truemail::Client).to receive(:reset_configuration!).and_call_original expect(configuration_builder).to be_an_instance_of(Truemail::Client::Configuration) expect(configuration_builder.host).not_to be_nil expect(configuration_builder.token).not_to be_nil end end context 'with custom params' do let(:host) { FFaker::Internet.domain_name } let(:token) { <PASSWORD>_token } let(:params) { { host: host, token: token } } it 'returns configuration instance with custom verifier email' do expect(Truemail::Client).to receive(:reset_configuration!).and_call_original expect(configuration_builder).to be_an_instance_of(Truemail::Client::Configuration) expect(configuration_builder.host).to eq(host) expect(configuration_builder.token).to eq(token) end end end describe '#client_configuration' do context 'when configuration has been configured' do before { configure_client } specify { expect(client_configuration).to be_an_instance_of(Truemail::Client::Configuration) } end context 'when configuration has not been configured' do specify { expect(client_configuration).to be_nil } end end end <file_sep>/spec/truemail/client/version_spec.rb # frozen_string_literal: true RSpec.describe Truemail::Client::VERSION do specify { is_expected.not_to be_nil } end <file_sep>/spec/truemail/client/configuration/argument_error_spec.rb # frozen_string_literal: true RSpec.describe Truemail::Client::Configuration::ArgumentError do subject(:argument_error_instance) { described_class.new('arg_value', 'arg_name=') } specify { expect(described_class).to be < ::StandardError } specify { expect(argument_error_instance).to be_an_instance_of(described_class) } specify { expect(argument_error_instance.to_s).to eq('arg_value is not a valid arg_name') } end <file_sep>/spec/truemail/client/configuration_spec.rb # frozen_string_literal: true RSpec.describe Truemail::Client::Configuration do subject(:configuration_instance) { described_class.new } let(:host) { FFaker::Internet.domain_name } let(:token) { <PASSWORD>_token } describe 'defined constants' do specify { expect(described_class).to be_const_defined(:DEFAULT_PORT) } end describe '.new' do %i[secure_connection host port token].each do |attribute| it "has attr_accessor :#{attribute}" do expect(configuration_instance.respond_to?(attribute)).to be(true) expect(configuration_instance.respond_to?(:"#{attribute}=")).to be(true) end end context 'when block passed' do subject(:configuration_instance) { described_class.new(&configuration_block(host: host, token: token)) } it 'accepts block, sets values' do expect(configuration_instance.host).to eq(host) expect(configuration_instance.token).to eq(token) end end describe 'configuration cases' do context 'when auto configuration' do it 'sets configuration instance with default configuration template' do expect(configuration_instance.secure_connection).to be(false) expect(configuration_instance.port).to eq(Truemail::Client::Configuration::DEFAULT_PORT) expect(configuration_instance.host).to be_nil expect(configuration_instance.token).to be_nil end end context 'when manual independent configuration' do shared_examples 'sets accessor' do it 'sets accessor' do expect { configuration_instance.public_send("#{accessor}=", to_value) } .to change(configuration_instance, accessor) .from(from_value) .to(to_value) end end shared_examples 'raises argument error' do specify do invalid_argument = -42 expect { configuration_instance.public_send("#{accessor}=", invalid_argument) } .to raise_error( Truemail::Client::Configuration::ArgumentError, "#{invalid_argument} is not a valid #{accessor}" ) end end describe '#secure_connection' do let(:accessor) { :secure_connection } let(:from_value) { false } let(:to_value) { true } include_examples 'sets accessor' end describe '#host' do let(:accessor) { :host } context 'with valid host' do let(:from_value) { nil } let(:to_value) { host } include_examples 'sets accessor' end context 'with invalid host' do include_examples 'raises argument error' end end describe '#port' do let(:accessor) { :port } context 'with valid port' do let(:port) { 8080 } let(:from_value) { Truemail::Client::Configuration::DEFAULT_PORT } let(:to_value) { port } include_examples 'sets accessor' end context 'with invalid port' do include_examples 'raises argument error' end end describe '#token' do let(:accessor) { :token } context 'with valid token' do let(:from_value) { nil } let(:to_value) { token } include_examples 'sets accessor' end context 'with invalid token' do include_examples 'raises argument error' end end end end end describe '#complete?' do context 'when required args not passed' do specify { expect(configuration_instance.complete?).to be(false) } end context 'when required args passed' do let(:configuration_instance) { described_class.new(&configuration_block(host: host, token: token)) } specify { expect(configuration_instance.complete?).to be(true) } end end end <file_sep>/.github/DEVELOPMENT_ENVIRONMENT_GUIDE.md # Development environment guide ## Preparing Clone `truemail-ruby-client` repository: ```bash git clone https://github.com/truemail-rb/truemail-ruby-client.git cd truemail-ruby-client ``` Configure latest Ruby environment: ```bash echo 'ruby-3.1.1' > .ruby-version cp .circleci/gemspec_latest truemail-client.gemspec ``` ## Installing dependencies ```bash bundle install ``` ## Commiting Commit your changes excluding `.ruby-version`, `truemail-client.gemspec` ```bash git add . ':!.ruby-version' ':!truemail-client.gemspec' git commit -m 'Your new awesome truemail-client feature' ``` <file_sep>/spec/truemail/client/configuration/error_spec.rb # frozen_string_literal: true RSpec.describe Truemail::Client::Configuration::Error do specify { expect(described_class).to be < ::StandardError } end <file_sep>/lib/truemail/client/http.rb # frozen_string_literal: true module Truemail module Client class Http require 'uri' require 'net/http' require 'json' URI_ATTRS = %i[secure_connection host port endpoint uri_params].freeze USER_AGENT = 'Truemail Ruby client' MIME_TYPE = 'application/json' VALIDATION_ENDPOINT = '/' HEALTHCHECK_ENDPOINT = '/healthcheck' def initialize(endpoint = Truemail::Client::Http::VALIDATION_ENDPOINT, **uri_params) Truemail::Client::Http::URI_ATTRS[0..2].each do |attribute| instance_variable_set(:"@#{attribute}", Truemail::Client.configuration.public_send(attribute)) end @endpoint = endpoint @uri_params = uri_params end def run ::Net::HTTP.start(uri.host, uri.port, use_ssl: secure_connection) do |http| request = ::Net::HTTP::Get.new(uri) request['User-Agent'] = Truemail::Client::Http::USER_AGENT request['Accept'] = Truemail::Client::Http::MIME_TYPE request['Content-Type'] = Truemail::Client::Http::MIME_TYPE unless endpoint.eql?(Truemail::Client::Http::HEALTHCHECK_ENDPOINT) request['Authorization'] = Truemail::Client.configuration.token end http.request(request) end.body rescue => error { truemail_client_error: error }.to_json end private attr_reader(*Truemail::Client::Http::URI_ATTRS) def request_uri ::URI::HTTP.build( path: endpoint, query: uri_params.empty? ? nil : ::URI.encode_www_form(uri_params) ).request_uri end def uri @uri ||= URI("#{secure_connection ? 'https' : 'http'}://#{host}:#{port}#{request_uri}") end end end end <file_sep>/spec/support/config/bundler.rb # frozen_string_literal: true require 'bundler/setup' <file_sep>/spec/support/helpers/request_helper_spec.rb # frozen_string_literal: true RSpec.describe RequestHelper, type: :helper do describe '#have_sent_request_with' do let(:secure_connection) { true } let(:host) { FFaker::Internet.domain_name } let(:port) { ::Random.rand(80..8080) } let(:token) { create_token } let(:method) { :get } let(:endpoint) { '/some_endpoint' } let(:request_params) { { email: FFaker::Internet.email } } let(:accept) { 'accept_header' } let(:content_type) { 'content_type_header' } let(:user_agent) { 'user_agent_header' } let(:configuration_settings) do { secure_connection: secure_connection, host: host, port: port, token: token } end let(:request_settings) do { method: method, accept: accept, content_type: content_type, user_agent: user_agent, endpoint: endpoint, type: endpoint_type, params: request_params } end def request(secure_connection:, host:, port:, token:, accept:, content_type:, user_agent:, endpoint:, type:, params:, **) # rubocop:disable Metrics/ParameterLists ::Net::HTTP.start(host, port, use_ssl: secure_connection) do |http| path = ::URI::HTTP.build( path: endpoint, query: params.empty? ? nil : ::URI.encode_www_form(params) ).request_uri.gsub('%40', '@') request = ::Net::HTTP::Get.new(URI("#{secure_connection ? 'https' : 'http'}://#{host}:#{port}#{path}")) request['User-Agent'] = user_agent request['Accept'] = accept request['Content-Type'] = content_type request['Authorization'] = token if type.eql?(:private) http.request(request) end end context 'when request is equal to mock' do shared_examples 'checks request settings, stubs current request' do it 'checks request settings, stubs current request' do have_sent_request_with(**request_configuration_settings, **request_settings) expect(request(**configuration_settings, **request_settings)).to match_json_schema('connection_successful') end end context 'when all settings passed' do let(:request_configuration_settings) { configuration_settings } let(:endpoint_type) { :private } include_examples 'checks request settings, stubs current request' end context 'when configuration settings not passed' do let(:request_configuration_settings) { {} } let(:endpoint_type) { :public } before { configure_client(**configuration_settings) } include_examples 'checks request settings, stubs current request' end context 'when email not passed in params' do let(:request_configuration_settings) { configuration_settings } let(:request_params) { {} } let(:endpoint_type) { :public } it 'checks request settings, stubs current request' do have_sent_request_with(**request_configuration_settings, **request_settings) response = request(**configuration_settings, **request_settings) expect(response.code).to eq('200') expect(response.body).to be_empty end end end context 'when request is not equal to mock' do let(:endpoint_type) { :public } specify do expect { request(**configuration_settings, **request_settings) } .to raise_error(WebMock::NetConnectNotAllowedError) end end end end <file_sep>/spec/spec_helper.rb # frozen_string_literal: true rspec_custom = ::File.join(File.dirname(__FILE__), 'support/**/*.rb') ::Dir[::File.expand_path(rspec_custom)].sort.each { |file| require file unless file[/\A.+_spec\.rb\z/] } require 'truemail/client' RSpec.configure do |config| config.expect_with(:rspec) do |expectations| expectations.include_chain_clauses_in_custom_matcher_descriptions = true expectations.syntax = :expect end config.mock_with(:rspec) do |mocks| mocks.verify_partial_doubles = true end config.include ConfigurationHelper config.include RequestHelper config.example_status_persistence_file_path = '.rspec_status' config.disable_monkey_patching! config.order = :random config.before { Truemail::Client.reset_configuration! } ::Kernel.srand(config.seed) end <file_sep>/lib/truemail/client.rb # frozen_string_literal: true require_relative '../truemail/client/version' require_relative '../truemail/client/configuration' require_relative '../truemail/client/http' module Truemail module Client INCOMPLETE_CONFIG = 'required args not passed' NOT_CONFIGURED = 'use Truemail::Client.configure before' class << self def configuration(&block) @configuration ||= begin return unless block configuration = Truemail::Client::Configuration.new(&block) raise_unless(configuration.complete?, Truemail::Client::INCOMPLETE_CONFIG) configuration end end def configure(&block) configuration(&block) end def reset_configuration! @configuration = nil end def validate(email) raise_unless(Truemail::Client.configuration, Truemail::Client::NOT_CONFIGURED) Truemail::Client::Http.new(email: email).run end def server_healthy? raise_unless(Truemail::Client.configuration, Truemail::Client::NOT_CONFIGURED) Truemail::Client::Http.new(Truemail::Client::Http::HEALTHCHECK_ENDPOINT).run.empty? end private def raise_unless(condition, message) raise Truemail::Client::Configuration::Error, message unless condition end end end end <file_sep>/README.md # ![Truemail web API client library for Ruby](https://truemail-rb.org/assets/images/truemail_logo.png) [![Maintainability](https://api.codeclimate.com/v1/badges/ccc7167f4f49d4a10146/maintainability)](https://codeclimate.com/github/truemail-rb/truemail-ruby-client/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/ccc7167f4f49d4a10146/test_coverage)](https://codeclimate.com/github/truemail-rb/truemail-ruby-client/test_coverage) [![CircleCI](https://circleci.com/gh/truemail-rb/truemail-ruby-client/tree/master.svg?style=svg)](https://circleci.com/gh/truemail-rb/truemail-ruby-client/tree/master) [![Gem Version](https://badge.fury.io/rb/truemail-client.svg)](https://badge.fury.io/rb/truemail-client) [![Downloads](https://img.shields.io/gem/dt/truemail-client.svg?colorA=004d99&colorB=0073e6)](https://rubygems.org/gems/truemail-client) [![Gitter](https://badges.gitter.im/truemail-rb/community.svg)](https://gitter.im/truemail-rb/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GitHub](https://img.shields.io/github/license/truemail-rb/truemail-ruby-client)](LICENSE.txt) [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg)](CODE_OF_CONDUCT.md) `truemail-client` gem - [Truemail web API](https://github.com/truemail-rb/truemail-rack) client library for Ruby. > Actual and maintainable documentation :books: for developers is living [here](https://truemail-rb.org/truemail-ruby-client). ## Table of Contents - [Requirements](#requirements) - [Installation](#installation) - [Usage](#usage) - [Setting global configuration](#setting-global-configuration) - [Establishing connection with Truemail API](#establishing-connection-with-truemail-api) - [Checking server health status](#checking-server-health-status) - [Additional features](#additional-features) - [Truemail family](#truemail-family) - [Contributing](#contributing) - [License](#license) - [Code of Conduct](#code-of-conduct) - [Versioning](#versioning) - [Changelog](CHANGELOG.md) ## Requirements Ruby MRI 2.5.0+ ## Installation Add this line to your application's Gemfile: ```ruby gem 'truemail-client' ``` And then execute: ```bash bundle ``` Or install it yourself as: ```bash gem install truemail-client ``` ## Usage To have an access for `Truemail::Client` you must configure it first as in the example below: ### Setting global configuration ```ruby require 'truemail/client' Truemail::Client.configure do |config| # Required parameter (String). It should be a hostname or an ip address where Truemail server runs config.host = 'example.com' # Required parameter (String). It should be valid Truemail server access token config.token = 'token' # Optional parameter (Boolean). By default it is equal false config.secure_connection = true # Optional parameter (Integer). By default it is equal 9292 config.port = 80 end ``` ### Establishing connection with Truemail API After successful configuration, you can establish connection with Truemail server. ```ruby Truemail::Client.validate('<EMAIL>') ``` ```json { "date": "2020-02-26 17:00:56 +0200", "email": "<EMAIL>", "validation_type": "smtp", "success": true, "errors": null, "smtp_debug": null, "configuration": { "validation_type_by_domain": null, "whitelist_validation": false, "whitelisted_domains": null, "blacklisted_domains": null, "blacklisted_mx_ip_addresses": null, "dns": null, "smtp_safe_check": false, "email_pattern": "default gem value", "smtp_error_body_pattern": "default gem value", "not_rfc_mx_lookup_flow": false } } ``` `Truemail::Client.validate` always returns JSON data. If something goes wrong you will receive JSON with error details: ```json { "truemail_client_error": "error details" } ``` ### Checking server health status After successful configuration, you can check health-status of Truemail server. ```ruby Truemail::Client.server_healthy? => true ``` ### Additional features #### Read global configuration After successful configuration, you can read current `Truemail::Client` configuration instance anywhere in your application. ```ruby Truemail::Client.configuration => #<Truemail::Client::Configuration:0x000055eafc588878 @host="example.com", @port=80, @secure_connection=true, @token="<PASSWORD>"> ``` #### Update global configuration ```ruby Truemail::Client.configuration.port = 8080 => 8080 Truemail::Client.configuration => #<Truemail::Client::Configuration:0x000055eafc588878 @host="example.com", @port=8080, @secure_connection=true, @token="<PASSWORD>"> ``` #### Reset global configuration Also you can reset Truemail::Client configuration. ```ruby Truemail::Client.reset_configuration! => nil Truemail::Client.configuration => nil ``` --- ## Truemail family All Truemail solutions: <https://truemail-rb.org> | Name | Type | Description | | --- | --- | --- | | [truemail](https://github.com/truemail-rb/truemail) | ruby gem | Configurable framework agnostic plain Ruby email validator, main core | | [truemail-go](https://github.com/truemail-rb/truemail-go) | go package | Configurable Golang email validator, main core | | [truemail server](https://github.com/truemail-rb/truemail-rack) | ruby app | Lightweight rack based web API wrapper for Truemail | | [truemail-rack-docker](https://github.com/truemail-rb/truemail-rack-docker-image) | docker image | Lightweight rack based web API [dockerized image](https://hub.docker.com/r/truemail/truemail-rack) :whale: of Truemail server | | [truemail-crystal-client](https://github.com/truemail-rb/truemail-crystal-client) | crystal shard | Truemail web API client library for Crystal | | [truemail-java-client](https://github.com/truemail-rb/truemail-java-client) | java lib | Truemail web API client library for Java | | [truemail-rspec](https://github.com/truemail-rb/truemail-rspec) | ruby gem | Truemail configuration and validator RSpec helpers | ## Contributing Bug reports and pull requests are welcome on GitHub at <https://github.com/truemail-rb/truemail-ruby-client>. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct. Please check the [open tickets](https://github.com/truemail-rb/truemail-ruby-client/issues). Be sure to follow Contributor Code of Conduct below and our [Contributing Guidelines](CONTRIBUTING.md). ## License The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). ## Code of Conduct Everyone interacting in the `truemail-ruby-client` project’s codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](CODE_OF_CONDUCT.md). ## Versioning `truemail-ruby-client` uses [Semantic Versioning 2.0.0](https://semver.org) <file_sep>/spec/support/helpers/request_helper.rb # frozen_string_literal: true module RequestHelper def have_sent_request_with(**request_settings) # rubocop:disable Metrics/AbcSize, Metrics/MethodLength request = Request.new(**request_settings) HOST_PARAMS.each do |attribute| request.public_send(:"#{attribute}=", request_settings[attribute] || client_configuration.public_send(attribute)) end authorization = request.type.eql?(:public) ? {} : { 'Authorization' => request.token } request_params = request.params path = ::URI::HTTP.build( path: request.endpoint, query: request_params.empty? ? nil : ::URI.encode_www_form(request_params) ).request_uri url = "#{request.secure_connection ? 'https' : 'http'}://#{request.host}:#{request.port}#{path}" stub_request(request.method, url).with( headers: { 'Accept' => request.accept, 'Content-Type' => request.content_type, 'Host' => "#{request.host}:#{request.port}", 'User-Agent' => request.user_agent }.merge(authorization) ).to_return(response(**request_params)) end private HOST_PARAMS = %i[secure_connection host port token].freeze REQUEST_PARAMS = %i[method accept content_type user_agent endpoint type params].freeze Request = ::Struct.new(*(HOST_PARAMS | REQUEST_PARAMS), keyword_init: true) def body(email) # rubocop:disable Metrics/MethodLength { configuration: { blacklisted_domains: nil, blacklisted_mx_ip_addresses: nil, dns: nil, email_pattern: 'default gem value', smtp_error_body_pattern: 'default gem value', smtp_safe_check: true, validation_type_by_domain: nil, whitelist_validation: false, whitelisted_domains: nil, not_rfc_mx_lookup_flow: false }, date: ::Time.now, email: email, errors: nil, smtp_debug: nil, success: true, validation_type: 'smtp' }.to_json end def response(email: nil, **) { status: 200, body: email ? body(email) : '', headers: {} } end end <file_sep>/spec/truemail/client/http_spec.rb # frozen_string_literal: true RSpec.describe Truemail::Client::Http do describe 'defined constants' do specify { expect(described_class).to be_const_defined(:URI_ATTRS) } specify { expect(described_class).to be_const_defined(:USER_AGENT) } specify { expect(described_class).to be_const_defined(:MIME_TYPE) } specify { expect(described_class).to be_const_defined(:VALIDATION_ENDPOINT) } specify { expect(described_class).to be_const_defined(:HEALTHCHECK_ENDPOINT) } end describe '#run' do subject(:run) { described_class.new(endpoint, **request_params).run } let(:configuration_settings) { {} } let(:endpoint_type) { :private } before { configure_client(**configuration_settings) } context 'when connection successful' do shared_examples 'sends valid request to truemail api' do it 'sends valid request to truemail api' do have_sent_request_with(**request_settings) expect(run).to match_json_schema('connection_successful') end end context 'when validation endpoint' do let(:endpoint) { Truemail::Client::Http::VALIDATION_ENDPOINT } let(:request_params) { { email: FFaker::Internet.email } } let(:request_settings) do { method: :get, accept: 'application/json', content_type: 'application/json', user_agent: 'Truemail Ruby client', endpoint: endpoint, type: endpoint_type, params: request_params } end context 'when secure connection' do let(:configuration_settings) { { secure_connection: true } } include_examples 'sends valid request to truemail api' end context 'when not secure connection' do include_examples 'sends valid request to truemail api' end end context 'when healthcheck enpoint' do let(:endpoint) { Truemail::Client::Http::HEALTHCHECK_ENDPOINT } let(:endpoint_type) { :public } let(:request_params) { {} } let(:request_settings) do { method: :get, accept: 'application/json', content_type: 'application/json', user_agent: 'Truemail Ruby client', endpoint: endpoint, type: endpoint_type, params: request_params } end it 'sends valid request to truemail api' do have_sent_request_with(**request_settings) expect(run).to be_empty end end end context 'when connection fails' do let(:endpoint) { '/some_endpoint' } let(:request_params) { {} } let(:error) { 'error context' } it 'returns json with client error' do allow(::Net::HTTP).to receive(:start).and_raise(::SocketError, error) expect(run).to match_json_schema('connection_error') end end end end <file_sep>/lib/truemail/client/configuration.rb # frozen_string_literal: true module Truemail module Client class Configuration DEFAULT_PORT = 9292 Error = ::Class.new(::StandardError) ArgumentError = ::Class.new(::StandardError) do def initialize(arg_value, arg_name) super("#{arg_value} is not a valid #{arg_name[0..-2]}") end end attr_reader :host, :port, :token attr_accessor :secure_connection def initialize(&block) @secure_connection = false @port = Truemail::Client::Configuration::DEFAULT_PORT tap(&block) if block end %i[host port token].each do |method| define_method("#{method}=") do |argument| raise_unless( argument, __method__, method.eql?(:port) ? argument.is_a?(::Integer) && argument.positive? : argument.is_a?(::String) ) instance_variable_set(:"@#{method}", argument) end end def complete? !!host && !!token end private def raise_unless(argument_context, argument_name, condition) raise Truemail::Client::Configuration::ArgumentError.new(argument_context, argument_name) unless condition end end end end <file_sep>/truemail-client.gemspec # frozen_string_literal: true require_relative 'lib/truemail/client/version' Gem::Specification.new do |spec| spec.name = 'truemail-client' spec.version = Truemail::Client::VERSION spec.authors = ['<NAME>'] spec.email = ['<EMAIL>'] spec.summary = %(truemail-client) spec.description = %(Truemail web API client library for Ruby) spec.homepage = 'https://github.com/truemail-rb/truemail-ruby-client' spec.license = 'MIT' spec.metadata = { 'homepage_uri' => 'https://truemail-rb.org', 'changelog_uri' => 'https://github.com/truemail-rb/truemail-ruby-client/blob/master/CHANGELOG.md', 'source_code_uri' => 'https://github.com/truemail-rb/truemail-ruby-client', 'documentation_uri' => 'https://truemail-rb.org/truemail-ruby-client', 'bug_tracker_uri' => 'https://github.com/truemail-rb/truemail-ruby-client/issues' } spec.required_ruby_version = '>= 2.5.0' spec.files = `git ls-files -z`.split("\x0").select { |f| f.match(%r{^(bin|lib)/|.ruby-version|truemail.gemspec|LICENSE}) } spec.require_paths = %w[lib] spec.add_development_dependency 'ffaker', '~> 2.21' spec.add_development_dependency 'json_matchers', '~> 0.11.1' spec.add_development_dependency 'rake', '~> 13.0', '>= 13.0.6' spec.add_development_dependency 'rspec', '~> 3.12' spec.add_development_dependency 'webmock', '~> 3.18', '>= 3.18.1' end <file_sep>/lib/truemail/client/version.rb # frozen_string_literal: true module Truemail module Client VERSION = '0.5.1' end end <file_sep>/spec/truemail/client_spec.rb # frozen_string_literal: true RSpec.describe Truemail::Client do describe 'defined constants' do specify { expect(described_class).to be_const_defined(:INCOMPLETE_CONFIG) } specify { expect(described_class).to be_const_defined(:NOT_CONFIGURED) } end describe 'global configuration methods' do let(:host) { FFaker::Internet.domain_name } let(:token) { create_token } let(:config_block) { configuration_block(host: host, token: token) } describe '.configure' do subject(:configure) { described_class.configure(&config_block) } context 'without block' do let(:config_block) { nil } specify { expect { configure }.not_to change(described_class, :configuration) } specify { expect(configure).to be_nil } end context 'with block' do context 'without required parameter' do let(:config_block) { configuration_block } specify do expect { configure } .to raise_error(Truemail::Client::Configuration::Error, Truemail::Client::INCOMPLETE_CONFIG) end end context 'with invalid argument' do let(:port) { -42 } let(:config_block) { configuration_block(port: port) } specify do expect { configure } .to raise_error(Truemail::Client::Configuration::ArgumentError, "#{port} is not a valid port") end end context 'with valid required arguments' do specify do expect { configure } .to change(described_class, :configuration) .from(nil).to(be_instance_of(Truemail::Client::Configuration)) end it 'sets attributes into configuration instance' do expect(configure).to be_an_instance_of(Truemail::Client::Configuration) expect(described_class.configuration.host).to eq(host) expect(described_class.configuration.token).to eq(token) end end end end describe '.configuration' do subject(:configuration) { described_class.configuration } context 'when configuration was not set yet' do specify { expect(configuration).to be_nil } end context 'when configuration was successfully set' do before { described_class.configure(&config_block) } specify { expect(configuration).to be_instance_of(Truemail::Client::Configuration) } it 'accepts to rewrite current configuration settings' do secure_connection, new_host, port, new_token = true, FFaker::Internet.domain_name, 8080, create_token expect do configuration.tap(&configuration_block( secure_connection: secure_connection, host: new_host, port: port, token: new_token )) end .to change(configuration, :secure_connection) .from(false).to(secure_connection) .and change(configuration, :host) .from(host).to(new_host) .and change(configuration, :port) .from(Truemail::Client::Configuration::DEFAULT_PORT).to(port) .and change(configuration, :token) .from(token).to(new_token) end end end describe '.reset_configuration!' do before { described_class.configure(&config_block) } specify do expect { described_class.reset_configuration! } .to change(described_class, :configuration) .from(be_instance_of(Truemail::Client::Configuration)).to(nil) end end end shared_examples 'global configuration was not set' do specify do expect { subject } .to raise_error(Truemail::Client::Configuration::Error, Truemail::Client::NOT_CONFIGURED) end end describe '.validate' do subject(:validate) { described_class.validate(email) } let(:email) { FFaker::Internet.email } let(:http_instance) { instance_double('Http') } context 'when global configuration was set' do before { configure_client } it 'creates http instance, sends request' do expect(Truemail::Client::Http).to receive(:new).with(email: email).and_return(http_instance) expect(http_instance).to receive(:run) validate end end context 'when global configuration was not set' do it_behaves_like 'global configuration was not set' end end describe '.server_healthy?' do subject(:server_healthy) { described_class.server_healthy? } let(:http_instance) { instance_double('Http') } context 'when global configuration was set' do before do configure_client allow(Truemail::Client::Http) .to receive(:new).with(Truemail::Client::Http::HEALTHCHECK_ENDPOINT).and_return(http_instance) allow(http_instance).to receive(:run).and_return(healthcheck_result) end shared_examples 'returns server health status' do it 'returns server health status' do expect(server_healthy).to be(expectation) end end context 'when server is healthy' do let(:healthcheck_result) { '' } let(:expectation) { true } include_examples 'returns server health status' end context 'when server is not healthy' do let(:healthcheck_result) { 'some_response_context' } let(:expectation) { false } include_examples 'returns server health status' end end context 'when global configuration was not set' do it_behaves_like 'global configuration was not set' end end end
a6b657e979dd87b7f2d9b1f53e764d3c44bb5bdc
[ "Markdown", "Ruby" ]
20
Markdown
truemail-rb/truemail-ruby-client
8450137e6f705e13db08284f658e3d8cd6d37760
18ed077c704d620751d9be9630d08dd9b83c5461
refs/heads/master
<file_sep>window.onload = function () { var el = document.getElementById('replaystatus'); el.onclick = function () { /*alert("Hello! I am an alert box!!");*/ var h = document.getElementById('replayform')//.style.display = 'block'; /*if (h.style.display == '') { h.style.display = 'block'; } else { h.style.display = ''; }*/ h.toggle(); }; }; /*-------------------------------------------------------------------*/
161aba195f45c1b820e9b051dde2bbcda4e61f24
[ "JavaScript" ]
1
JavaScript
stvngrcia/minit
b31173c3bf2070f42ee7d24d65b22e3f1d75b5f0
8df215f4c2f6c7724ec9555bd504371e0b369693
refs/heads/master
<repo_name>jozecuervo/rdio-spooky<file_sep>/README.md # rdio-spooky A sequencer for haloween costume walk-up songs.
6ebb0f9a84ee847e3c845b513d8ba4b852658c11
[ "Markdown" ]
1
Markdown
jozecuervo/rdio-spooky
20291f3c3c9a13b83a23800558b466d50df17913
260628d83a0ea7c50190a51062f51e98fd88ad9a
refs/heads/main
<repo_name>piyushjasaiwal/Math-Garden<file_sep>/darwing.js const BACKGROUND_COLOR = '#000000'; const LINE_COLOR = '#FFFFFF'; const WIDTH = 15; var cur_x = 0; var cur_y = 0; var pre_x = 0; var pre_y = 0; var canvas; var context; function prepareCanvas() { // console.log('preparing canvas'); canvas = document.getElementById('my_canvas'); context = canvas.getContext('2d'); context.fillStyle = BACKGROUND_COLOR; context.fillRect(0, 0, canvas.clientWidth, canvas.clientHeight); context.strokeStyle = LINE_COLOR; context.lineWidth = WIDTH; context.lineJoin = 'round'; var isMouseDown = false; document.addEventListener('mousedown', function (event) { isMouseDown = true; cur_x = event.clientX - canvas.offsetLeft; cur_y = event.clientY - canvas.offsetTop; }); document.addEventListener('mousemove', function (event) { if (isMouseDown) { pre_x = cur_x; cur_x = event.clientX - canvas.offsetLeft; pre_y = cur_y; cur_y = event.clientY - canvas.offsetTop; context.beginPath(); context.moveTo(pre_x, pre_y); context.lineTo(cur_x, cur_y); context.closePath(); context.stroke(); } }); document.addEventListener('mouseup', function (event) { isMouseDown = false; }); canvas.addEventListener('mouseleave', function (event) { isMouseDown = false; }); } function clearCanvas(){ cur_x = 0; cur_y = 0; pre_x = 0; pre_y = 0; context.fillRect(0, 0, canvas.clientWidth, canvas.clientHeight); } <file_sep>/README.md # Math-Garden ## Predicting the test image #### Testing Image ![](https://github.com/piyushjasaiwal/Math-Garden/blob/main/screen_shots/opening_testing_image.PNG) #### Output ![](https://github.com/piyushjasaiwal/Math-Garden/blob/main/screen_shots/predicting_image.PNG) ## Working of the project <!-- https://github.com/piyushjasaiwal/Math-Garden/blob/main/videos/project_working.mp4 --> https://user-images.githubusercontent.com/61797428/124391016-0cdba780-dd0c-11eb-88b3-bdecd3f7f458.mp4
380a9f2d101d9b4de9c3a8ee48db56e3af784372
[ "Markdown", "JavaScript" ]
2
Markdown
piyushjasaiwal/Math-Garden
e6332d783a20508f76a27a8be5d9065747cbeef2
c39bf34f093353a7e62450a904e5f26507a319fc
refs/heads/master
<file_sep># Andris Tetris game made by <NAME> <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class SpawningTetrisItem : MonoBehaviour { public GameObject[] tetrisItem; private TetrisItemI figure; private int IndexOfItem; public bool isspawned; // Start is called before the first frame update void Start() { SpawningTheFigure(); figure = FindObjectOfType<TetrisItemI>(); } private void OnTriggerExit2D(Collider2D other) { isspawned = false; other.enabled = false; } private void OnTriggerEnter2D(Collider2D other) { isspawned = true; } private void OnTriggerStay2D(Collider2D collision) { isspawned = true; } private void SpawningTheFigure() { IndexOfItem = Random.Range(0, tetrisItem.Length); tetrisItem[IndexOfItem].transform.position = transform.position; Instantiate(tetrisItem[IndexOfItem]); isspawned = true; } // Update is called once per frame void Update() { if (!isspawned & figure.issnapped) { SpawningTheFigure(); } } } <file_sep>using System.Collections; using System.Collections.Generic; using UnityEngine; public class TetrisItemI : MonoBehaviour { public bool issnapped; public bool following; protected Grid gridobj; private SpawningTetrisItem[] spawner; private AudioSource clipToPlay; public float offset = 0.05f; public AudioClip click; void Awake() { clipToPlay = GetComponent<AudioSource>(); issnapped = false; spawner = FindObjectsOfType<SpawningTetrisItem>(); gridobj = FindObjectOfType<Grid>(); following = false; offset += 10; } // Update is called once per frame void Update () { if(Input.GetMouseButtonDown(0) && ((Camera.main.ScreenToWorldPoint(Input.mousePosition) - transform.position).magnitude <= offset)) { if (following) { following = false; if(this.tag == "I") { transform.GetChild(0).position = gridobj.GetCellCenterLocal(gridobj.LocalToCell(transform.GetChild(0).position)); } else { transform.position = gridobj.GetCellCenterLocal(gridobj.LocalToCell(transform.position)); } issnapped = true; } else { following = true; issnapped = false; } } if (following & !issnapped) { foreach(Transform child in transform) { child.localScale = new Vector2(1.4f, 1.4f); } transform.position = Vector2.MoveTowards(transform.position, Camera.main.ScreenToWorldPoint(Input.mousePosition), 1f); } } }
a19cf80685cdbdc8b1698f814a4f4b7ab83193d9
[ "C#", "Markdown" ]
3
C#
Laktik228/Andris
e2e548338f96a06cad1c283522149c2a9f9f533f
10db543049545f143ce0ecbc3103acfb0c1b55d9
refs/heads/main
<file_sep>$(function(){ // 비즈니스 솔루션 영역 hover $(".solution_solution ul > li > div > div:last-child").hover( function () { // over $(this).stop().animate({opacity: 1}, 400); }, function () { // out $(this).stop().animate({opacity: 0}, 400); } ); // 브랜드 영역 hover $(".brand_product > ul > li").hover( function () { // over $(this).children("div:last-child").animate({opacity: 1}, 400); }, function () { // out $(this).children("div:last-child").animate({opacity: 0}, 400); } ); // faq 펼쳐지고 접히게 하기 $(".faq_title > a").click(function (e) { $(".faq_sub").not($(this).parent().next()).slideUp(); $(this).parent().next().slideToggle(); e.preventDefault(); }); $(".faq_title > span > a").click(function (e) { $(".faq_sub").not($(this).parent().parent().next()).slideUp(); $(this).parent().parent().next().slideToggle(); e.preventDefault(); }); });<file_sep>$(function(){ // fullpage 선언 $('#fullpage').fullpage({ // scrollBar: true, //ie에서 스크롤이 안되는 문제 해결 위해 추가했으나, 크롬콘솔창에 에러가 발생하여 주석 처리함. navigation: true, navigationPosition: 'right', responsiveWidth: 1025, afterLoad: function(anchorLink, index){ var span = $("#fp-nav ul li a span, .fp-slidesNav ul li a span"); var spanActive = $("#fp-nav ul li a.active span, #fp-nav ul li:hover a.active span, .fp-slidesNav ul li a.active span, .fp-slidesNav ul li:hover a.active span"); // 페이지 수가 1이거나 5이거나 6이면 우측 네비게이션(점모양) 흰색으로, 아니라면 검은색으로 if(index === 1 || index === 5 || index === 6) { span.css({background: "transparent", border: "2px solid #fff"}); spanActive.css("background", "#fff"); } else { span.css({background: "transparent", border: "2px solid #000"}); spanActive.css("background", "#000"); } } }); // intro_slick $('.intro_slick').slick({ dots: false, arrows: true, autoplay: true, autoplaySpeed: 4000, pauseOnHover: false, pauseOnFocus: false, prevArrow: $('.intro .arrow_prev'), nextArrow: $('.intro .arrow_next') }); // intro slick caption 애니메이션 $(".intro #caption1 > div").animate({ opacity: "1", marginLeft: "0" }, 800, function(){ $(".intro #caption1 > h2").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption1 > p").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption1 > a").animate({ opacity: "1", marginLeft: "0" }, 300); }); }); }); // On before slide change $('.intro_slick').on('beforeChange', function(event, slick, currentSlide, nextSlide){ switch (nextSlide) { case 0: // 초기화 $(".intro #caption1 > div, .intro #caption1 > h2, .intro #caption1 > p, .intro #caption1 > a").css({opacity: "0", marginLeft: "50px"}); // 애니메이션 순차적 적용 $(".intro #caption1 > div").animate({ opacity: "1", marginLeft: "0" }, 800, function(){ $(".intro #caption1 > h2").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption1 > p").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption1 > a").animate({ opacity: "1", marginLeft: "0" }, 300); }); }); }); break; case 1: $(".intro #caption2 > div, .intro #caption2 > h2, .intro #caption2 > p, .intro #caption2 > a").css({opacity: "0", marginLeft: "50px"}); $(".intro #caption2 > div").animate({ opacity: "1", marginLeft: "0" }, 800, function(){ $(".intro #caption2 > h2").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption2 > p").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption2 > a").animate({ opacity: "1", marginLeft: "0" }, 300); }); }); }); break; case 2: $(".intro #caption3 > div, .intro #caption3 > h2, .intro #caption3 > p, .intro #caption3 > a").css({opacity: "0", marginLeft: "50px"}); $(".intro #caption3 > div").animate({ opacity: "1", marginLeft: "0" }, 800, function(){ $(".intro #caption3 > h2").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption3 > p").animate({ opacity: "1", marginLeft: "0" }, 300, function(){ $(".intro #caption3 > a").animate({ opacity: "1", marginLeft: "0" }, 300); }); }); }); break; } }); // intro jarallax $('.jarallax').jarallax({ speed: 0.9 }); // product slider var productSwiper1 = new Swiper('.brand_slide1', { observer: true, slidesPerView: 4, scrollbar: { el: '.swiper-scrollbar', draggable: true, hide: false }, breakpoints : { 1480 : { width : 870, slidesPerView: 3 }, 1024 : { width : 580, slidesPerView: 2 }, 376 : { width : 360, slidesPerView: 1 }, } }); var productSwiper2 = new Swiper('.brand_slide2', { observer: true, slidesPerView: 4, scrollbar: { el: '.swiper-scrollbar', draggable: true, hide: false }, breakpoints : { 1480 : { width : 870, slidesPerView: 3 }, 1024 : { width : 580, slidesPerView: 2 }, 376 : { width : 360, slidesPerView: 1 }, } }); var productSwiper3 = new Swiper('.brand_slide3', { observer: true, slidesPerView: 4, scrollbar: { el: '.swiper-scrollbar', draggable: true, hide: false }, breakpoints : { 1480 : { width : 870, slidesPerView: 3 }, 1024 : { width : 580, slidesPerView: 2 }, 376 : { width : 360, slidesPerView: 1 }, } }); // product 탭메뉴 초기화 $(".product_tab_menu > li:first-child > a").addClass("active"); // product title 탭 초기화 $(".product .title > h3 > div:first-child").css("display", "block"); // product brand_box 탭 초기화 $(".product .brand_box:first-child").css("display", "block"); // product slider 탭 초기화 $(".product .brand_slide:first-child").css("display", "block"); // 리셋 및 안보이게 하기 function tabReset() { // 탭메뉴 리셋 $(".product_tab_menu > li > a").removeClass("active"); // 모든 타이틀 안보이게 하기 $(".product .title > h3 > div").css("display", "none"); // 모든 brand_box 안보이게 하기 $(".product .brand_box").css("display", "none"); // 모든 slider 안보이게 하기 $(".product .brand_slide").css("display", "none"); }; // product 첫번째 탭메뉴 클릭시 $(".product_tab_menu > li:first-child > a").click(function (e) { tabReset(); // 첫번째 탭메뉴 활성화 $(this).addClass("active"); // 첫번째 타이틀 보이기 $(".product .title > h3 > div:first-child").css("display", "block"); // 첫번째 brand_box 탭 보이기 $(".product .brand_box:first-child").css("display", "block"); // 첫번쨰 slider 탭 보이기 $(".product .brand_slide:first-child").css("display", "block"); e.preventDefault(); }); // product 두번째 탭메뉴 클릭시 $(".product_tab_menu > li:nth-child(2) > a").click(function (e) { tabReset(); // 두번째 탭메뉴 활성화 $(this).addClass("active"); // 두번째 타이틀 보이기 $(".product .title > h3 > div:nth-child(2)").css("display", "block"); // 두번째 brand_box 탭 보이기 $(".product .brand_box:nth-child(2)").css("display", "block"); // 두번쨰 slider 탭 보이기 $(".product .brand_slide:nth-child(2)").css("display", "block"); e.preventDefault(); }); // product 세번째 탭메뉴 클릭시 $(".product_tab_menu > li:nth-child(3) > a").click(function (e) { tabReset(); // 세번째 탭메뉴 활성화 $(this).addClass("active"); // 세번째 타이틀 보이기 $(".product .title > h3 > div:nth-child(3)").css("display", "block"); // 세번째 brand_box 탭 보이기 $(".product .brand_box:nth-child(3)").css("display", "block"); // 세번쨰 slider 탭 보이기 $(".product .brand_slide:nth-child(3)").css("display", "block"); e.preventDefault(); }); // news slider var newsSwiper = new Swiper('.news_slider', { loop: true, pagination: { el: '.swiper-pagination', clickable: true, }, navigation: { nextEl: '.swiper-button-next', prevEl: '.swiper-button-prev', }, }); newsSwiper.on('slideChange', function () { console.log(newsSwiper.realIndex); var whiteTxt1 = $(".news .white_text > div:first-child"); var whiteTxt2 = $(".news .white_text > div:nth-child(2)"); var whiteTxt3 = $(".news .white_text > div:nth-child(3)"); var previewImg = $(".preview").children("img"); function whitePreviewReset() { $(".news .white_text > div").css({display: "none", right: "-300px", opacity: 0}); $(".preview").css("opacity", "0"); } switch (newsSwiper.realIndex) { case 0: // 초기화 whitePreviewReset(); // 애니메이션 설정 whiteTxt1.css("display", "block").animate({ right: 0, opacity: 1 }, 400); previewImg.attr("src", "../images/main/news2.jpg").parent().animate({opacity: 1}, 800); break; case 1: // 초기화 whitePreviewReset(); // 애니메이션 설정 whiteTxt2.css("display", "block").animate({ right: 0, opacity: 1 }, 400); previewImg.attr("src", "../images/main/news3.png").parent().animate({opacity: 1}, 800); break; case 2: // 초기화 whitePreviewReset(); // 애니메이션 설정 whiteTxt3.css("display", "block").animate({ right: 0, opacity: 1 }, 400); previewImg.attr("src", "../images/main/news1.png").parent().animate({opacity: 1}, 800); break; } }); // preview 클릭 이벤트 $(".preview").click(function (e) { $(".swiper-button-next").click(); e.preventDefault(); }); // rnd, recruit 영역 hover $(".rnd_recruit .rnd > div").hover( function () { $(this).animate({top: "50%"}, 400); $(this).css({width: "100%", height: "100%"}); $(this).parent().css("background", "url(../images/main/rnd_bg_over.jpg)"); $(this).children("p").animate({opacity: 1}, 400); $(this).children("a").animate({opacity: 1}, 400); }, function () { $(this).animate({top: "60%"}, 400); $(this).css({width: "auto", height: "auto"}); $(this).parent().css("background", "url(../images/main/rnd_bg.jpg)"); $(this).children("p").animate({opacity: 0}, 400); $(this).children("a").animate({opacity: 0}, 400); } ); $(".rnd_recruit .recruit > div").hover( function () { $(this).animate({top: "50%"}, 400); $(this).css({width: "100%", height: "100%"}); $(this).parent().css("background", "url(../images/main/inc_bg_over.jpg)"); $(this).children("p").animate({opacity: 1}, 400); $(this).children("a").animate({opacity: 1}, 400); }, function () { $(this).animate({top: "60%"}, 400); $(this).css({width: "auto", height: "auto"}); $(this).parent().css("background", "url(../images/main/inc_bg.jpg)"); $(this).children("p").animate({opacity: 0}, 400); $(this).children("a").animate({opacity: 0}, 400); } ); });<file_sep>$(function(){ // 스크롤 막기/활성화용 함수 function scrollDisable(){ $('html, body').addClass('scrollDisable').on('scroll touchmove mousewheel', function(e){ // e.preventDefault(); }); } function scrollAble(){ $('html, body').removeClass('scrollDisable').off('scroll touchmove mousewheel'); } // gnb_slick $('.gnb_slick').slick({ dots: true, arrows: false, dotsClass: 'slick-dots' }); // gnb_hidden 보이기/숨기기 $(".gnb > ul > li, .gnb_bg").hover( function () { $(".gnb_hidden").css("display", "block"); $(".gnb_bg").css("display", "block"); // display : none 에서 display : block 으로 될텐데 // 이때 slick 슬라이더의 너비와 높이가 지정되지 않는 오류가 생긴다. // slick의 위치를 수동으로 새로 고쳐줘야 한다. $('.gnb_slick').slick('setPosition'); }, function () { $(".gnb_hidden").css("display", "none"); $(".gnb_bg").css("display", "none"); } ); // ----- gnb mobile ----- // gnb_m_btn 클릭시 모바일gnb 보이기 $(".gnb_m_btn").click(function (e) { $(".gnb_mobile").animate({left: "0"}, 400); $(".gnb_mobile_back").css("display", "block"); $(".gnb_x_box").animate({right: 0}, 600); // 스크롤 방지 scrollDisable(); e.preventDefault(); }); // 검은배경 클릭시 모바일gnb 닫힘 $(".gnb_mobile_back").click(function (e) { $(".gnb_mobile").animate({left: "-100%"}, 400); $(this).css("display", "none"); $(".gnb_x_box").css("right", "-100%"); // 스크롤 활성화 scrollAble(); e.preventDefault(); }); // x 버튼 클릭시 모바일gnb 닫힘 $(".gnb_x_box").click(function (e) { $(".gnb_mobile").animate({left: "-100%"}, 400); $(".gnb_mobile_back").css("display", "none"); $(this).css("right", "-100%"); // 스크롤 활성화 scrollAble(); e.preventDefault(); }); // angle 버튼 클릭시 서브메뉴 접히고 펼쳐지기 $(".gnb_mobile_angle").click(function (e) { $(".gnb_mobile_angle").not($(this)).removeClass("up").parent().next().slideUp(); $(this).toggleClass("up").parent().next().slideToggle(); e.preventDefault(); }); // css 애니메이션 발생 $(".pre_ani").each(function(){ $(this).removeClass('ani-stop'); }); // 스크롤에 따라서 css 애니메이션 발생 $(window).on('scroll',function(){ $(".animated").each(function(){ var currentTarget = $(this), currentTargetPos = currentTarget.offset().top - $(".gnb").height() * 7; // 7을 곱한 이유는 스크롤에 따른 반응이 여유있게 되도록 하기 위함. 큰 의미 없는 숫자임. if($(window).scrollTop() >= currentTargetPos){ currentTarget.removeClass('ani-stop'); } }); }); });
a03558cd5cf7be765cb469edc51c8fff87398ddd
[ "JavaScript" ]
3
JavaScript
sollan0506/taekyung
c699996f81ae3dfac8590c2759719dea796809a3
84b39cddd237f990b89011cd90e8ddff0a1c41bc
refs/heads/main
<repo_name>LoveU3tHousand2/my-first-binder<file_sep>/hello.py print('Hello, mTFK!')
11c0ac04267842b10c934a355e1a66d73fab497d
[ "Python" ]
1
Python
LoveU3tHousand2/my-first-binder
3c7795e68a7606e1c40ffd466c5d2242b8725c88
3422b966ab5797e8e9c5604e7c0bb8eb48d5f12e
refs/heads/master
<repo_name>tzellman/flask-mako<file_sep>/README.md Flask Mako ========== Provides Mako support in Flask. Installation ------------ setup.py install Usage ----- *app.cfg* (or however you configure your app) # one or more directories MAKO_DIR = 'sourcery/templates' # optional, if specified Mako will cache to this directory MAKO_CACHEDIR = '/tmp/mako' # optional, if specified Mako will respect the cache size MAKO_CACHESIZE = 500 *run.py* (or wherever you create your app) def create_app(name, **kw): from flask import Flask, g from flaskext.mako import init_mako app = Flask(name) app.config.update(kw) init_mako(app) *views.py* from flaskext.mako import render_template app = Module(__name__) @app.route('/') def index(): return render_template('test.html', username='Anonymous')
2a4f9ee9d168891fed2f64d46e09cd8431566593
[ "Markdown" ]
1
Markdown
tzellman/flask-mako
1aad316d563dee5400f9f874516359882cd654da
2003efbd813341e011f508edb9dcf689c93722f5
refs/heads/master
<repo_name>kigawas/coheoka<file_sep>/coheoka/utils.py # -*- coding: utf-8 -*- ''' Preprocessing utilities ''' from random import shuffle, sample import cPickle as pickle import re from nltk import sent_tokenize from scipy.stats import kendalltau as tau def shuffle_sents(text, times): sents = sent_tokenize(text) res = [] for i in range(times): shuffle(sents) res.append(' '.join(sents)) return res def shuffle_words(sent): words = filter(lambda x: len(x) > 0, re.split(r'\.|\?|\!|\s', sent)) shuffle(words) return ' '.join(words) + '.' def replace_sents(text, times): sents = sent_tokenize(text) shuffle(sents) sents[0] = shuffle_words(sents[0]) sents[-1] = shuffle_words(sents[-1]) res = [] for i in range(times): shuffle(sents) res.append(' '.join(sents)) return res def remove_sents(text, times, remove_number=1): sents = sent_tokenize(text) res = [] for i in range(times): res.append(' '.join(sample(sents, len(sents) - remove_number))) return res def add_sents(text, times, added_text, add_number=1): sents = sent_tokenize(text) sents.append(added_text) res = [] for i in range(times): shuffle(sents) res.append(' '.join(sents)) return res def tau_score_of_sentents(sent1_tokens, sent2_tokens): assert len(sent1_tokens) == len(sent2_tokens) t = tau(sent1_tokens, sent2_tokens)[0] if t <= 0.33: return -1 elif t > 0.33 and t <= 0.66: return 0 else: return 1 def pk_dump(filename, obj): with open(filename, 'wb') as f: pickle.dump(obj, f) def pk_load(filename): return pickle.load(open(filename, 'rb')) <file_sep>/coheoka/evaluator.py # -*- coding: utf-8 -*- """ Evaluator based on transition matrix """ from __future__ import print_function, division from nltk import sent_tokenize from sklearn import cross_validation, svm from scipy.stats import kendalltau as tau import numpy as np from entity_transition import TransitionMatrix from ranking import transform_pairwise class Evaluator(object): def __init__(self, corpus, shuffle_times=20, origin_label=1, shuffle_label_func=lambda x, y: -1): self._corpus = corpus self._origin_matrix = self._label_origin_corpus(origin_label) self._shuffled_matrix = self._label_shuffled_corpus(shuffle_times, shuffle_label_func) self._matrix = np.concatenate((self._origin_matrix, self._shuffled_matrix)) self._X, self._y, self._clf, self._fitted_clf = None, None, None, None @property def corpus(self): return self._corpus @property def matrix(self): return self._matrix @property def X(self): if self._X is not None: return self._X else: raise AttributeError( 'Not generated. Please call `make_data_and_clf` first.') @property def y(self): if self._y is not None: return self._y else: raise AttributeError( 'Not generated. Please call `make_data_and_clf` first.') @property def clf(self): if self._clf is not None: return self._clf else: raise AttributeError( 'Not generated. Please call `make_data_and_clf` first.') @property def fitted_clf(self): if self._fitted_clf is not None: return self._fitted_clf else: raise AttributeError('Not generated. Please call `fit` first.') def _label_origin_corpus(self, label): res = [] for text in self.corpus: res.append((text, label)) return res def _label_shuffled_corpus(self, times, label_func): return sum( [self._shuffle_text(text, times, label_func) for text in self.corpus], []) def _shuffle_text(self, text, times, label_func): from random import shuffle origin_sents = sent_tokenize(text) assert len(origin_sents) > 1 sents = sent_tokenize(text) res = [] for i in range(times): shuffle(sents) label = label_func(sents, origin_sents) res.append((' '.join(sents[:-1]), label)) return res def make_data_and_clf(self, clf=svm.LinearSVC): if self._X is None: self._X = TransitionMatrix([c for c in self.matrix[:, 0] ]).tran_matrix.as_matrix() self._y = self.matrix[:, 1].astype(int) self._clf = clf else: pass return self def predict(self, clf, X): return np.dot(X, clf.coef_.ravel()) def get_ranking_order(self, clf, X): return np.argsort(clf.predict(X)) def evaluate_tau(self, test_size=0.3): X, y = transform_pairwise(self.X, self.y) X_train, X_test, y_train, y_test = cross_validation.train_test_split( X, y, test_size=test_size) c = self.clf() c.fit(X_train, y_train) return tau(self.predict(c, X_test), y_test) def evaluate_accuracy(self, test_size=0.3): X, y = transform_pairwise(self.X, self.y) X_train, X_test, y_train, y_test = cross_validation.train_test_split( X, y, test_size=test_size) c = self.clf() c.fit(X_train, y_train) return c.score(X_test, y_test) def fit(self): X, y = transform_pairwise(self.X, self.y) self._fitted_clf = self.clf().fit(X, y) return self def evaluate_coherence(self, text): x = TransitionMatrix([text]).tran_matrix.as_matrix() return self.predict(self.fitted_clf, x) def test(*text): e = Evaluator(text).make_data_and_clf() print([e.evaluate_accuracy() for i in range(5)]) print([e.evaluate_tau()[0] for i in range(5)]) t = 'My friend is Bob. He loves playing basketball. And he also is good at tennis.' # NOQA e.fit() print(e.evaluate_coherence(t)) if __name__ == '__main__': T1 = 'My friend is Bob. He loves playing basketball. And he also is good at tennis.' # NOQA T2 = 'I have a friend called Bob. He loves playing basketball. I also love playing basketball. We play basketball together sometimes.' # NOQA T3 = 'I like apple juice. He also likes it. And he almost drinks apple juice every day.' # NOQA test(*[T1, T2, T3]) <file_sep>/coheoka/coherence_probability.py # -*- coding: utf-8 -*- ''' Coherence probability based on entity grid Reference: <NAME>., & <NAME>. (2005, July). Automatic evaluation of text coherence: Models and representations. In IJCAI (Vol. 5, pp. 1085-1090). ''' from __future__ import print_function, division from math import log import numpy as np from entity_grid import EntityGrid class CoherenceProbability(object): def __init__(self, text, coref=True): self._eg = EntityGrid(text).resolve_coreference( ) if coref else EntityGrid(text) self._coher_prob = self._coherence_prob() @property def grid(self): return self._eg.grid @property def coherence_prob(self): return self._coher_prob def _get_column_prob(self, col): column = self._eg.grid[col].tolist() sent_len = len(column) assert sent_len > 1 transition_count = {} for tran in zip(column[1:], column[:-1]): transition_count[tran] = transition_count.get(tran, 0) + 1 probs = [] for i, role in enumerate(column): if i == 0: probs.append(log(column.count(column[0]) / sent_len)) else: tran_cnt = transition_count[(column[i], column[i - 1])] ent_cnt = column.count(column[i - 1]) probs.append(log(tran_cnt / ent_cnt)) assert all([p <= 0.0 for p in probs]) return sum(probs) / len(probs) def _coherence_prob(self): res = [] for col in self._eg.grid.columns: res.append(self._get_column_prob(col)) return sum(res) / len(res) class ProbabilityVector(object): def __init__(self, corpus): self._corpus = corpus self._probs = None @property def corpus(self): return self._corpus @property def probs(self): if self._probs: return self._probs else: raise ValueError('Please call `make_probs` first') @property def mean(self): return np.mean(self.probs) @property def std(self): return np.std(self.probs) @property def var(self): return np.var(self.probs) def evaluate_coherence(self, text): p = CoherenceProbability(text) res = p.coherence_prob - self.mean return p.coherence_prob, res def make_probs(self): res = [] for text in self.corpus: try: p = CoherenceProbability(text).coherence_prob res.append(p) except: print(text) self._probs = res return self if __name__ == '__main__': T = 'I have a friend called Bob. He loves playing basketball. I also love playing basketball. We play basketball together sometimes.' # NOQA e = CoherenceProbability(T) print(e.coherence_prob) <file_sep>/coheoka/assessment.py # -*- coding: utf-8 -*- """ Assess model performance """ from __future__ import print_function, division import os from nltk import sent_tokenize from utils import replace_sents, pk_load from evaluator import Evaluator from coherence_probability import ProbabilityVector class Assessment(object): def __init__(self, corpus, pv, ev): self.corpus = self._preprocess(corpus) + self._label_corpus(corpus) assert type(pv) == ProbabilityVector assert type(ev) == Evaluator self.pv = pv self.ev = ev def _preprocess(self, corpus): res = [] for text in corpus: text = '. '.join(text.split('.')) res.append((text, 1)) return res def _label_corpus(self, corpus): res = [] for text in corpus: text = '. '.join(text.split('.')) remove_one = replace_sents(text, 1)[0] res.append((remove_one, -1)) return res def assess_pv(self, text): if len(sent_tokenize(text)) <= 1: return -1 pb = self.pv.evaluate_coherence(text)[0] if pb < self.pv.mean: return -1 elif self.pv.mean <= pb <= self.pv.mean + 2 * self.pv.std: return 1 else: return 1 def assess_ev(self, text): rank = self.ev.evaluate_coherence(text)[0] if rank < 0.2: return -1 elif 0.2 <= rank < 1: return 1 else: return 1 def assess_all(self): ev_right, pv_right, length = 0, 0, len(self.corpus) cnt = 0 for text, label in self.corpus: ev_res, pv_res = None, None cnt += 1 try: ev_res = self.assess_ev(text) pv_res = self.assess_pv(text) except Exception: print(text) else: print('{}/{}'.format(cnt, length)) if ev_res == label: ev_right += 1 if pv_res == label: pv_right += 1 return ev_right / length, pv_right / length if __name__ == '__main__': cur_dir = os.path.abspath(os.path.dirname(__file__)) pv = pk_load(os.path.join(cur_dir, 'pickles', 'pv.pkl')) ev = pk_load(os.path.join(cur_dir, 'pickles', 'ev.pkl')) with open(os.path.join(cur_dir, 'corpus', 'test.txt')) as f: testtxt = f.read().split('////') assess = Assessment(testtxt[:2], pv, ev) print(assess.assess_all()) <file_sep>/coheoka/entity_grid.py # -*- coding: utf-8 -*- ''' Build entity grid using StanfordCoreNLP Reference: <NAME>., & <NAME>. (2008). Modeling local coherence: An entity-based approach. Computational Linguistics, 34(1), 1-34. ''' from __future__ import print_function, division from collections import defaultdict from functools import reduce from pprint import pprint import doctest import os import pandas as pd from corenlp import StanfordCoreNLP class CoreNLP(object): '''Connect CoreNLP server''' _NLP = StanfordCoreNLP(os.environ.get('CORENLP_URL') or 'http://localhost:9000') _LOCAL_DEMO_PROP = { 'annotators': 'tokenize, ssplit, pos, lemma, ner, depparse, openie, coref', "openie.resolve_coref": "true", 'outputFormat': 'json' } _ONLINE_DEMO_PROP = { "annotators": "tokenize,ssplit,pos,ner,depparse,openie,coref", "coref.md.type": "dep", "coref.mode": "statistical", 'outputFormat': 'json' } @staticmethod def annotate(text): '''Get result from CoreNLP via JSON''' try: return CoreNLP.nlp().annotate(text, properties=CoreNLP._ONLINE_DEMO_PROP) except UnicodeError: pprint(text) @staticmethod def nlp(): '''Return CoreNLP Server''' return CoreNLP._NLP class Constants(object): '''Some constants''' REMOVE_ABBR = {'Inc.', 'Inc', 'Corp.', 'Corp'} _NOUNS = {'NN', 'NNS', 'NNP', 'NNPS', 'PRP'} # S O X _SUBJECTS = {'subj', 'nsubj', 'nsubjpass', 'csubj', 'csubjpass'} _OBJECTS = {'obj', 'iobj', 'dobj'} SUB, OBJ, OTHER, NOSHOW = 'S', 'O', 'X', '-' @staticmethod def noun_tags(): """Get noun POS tags""" return Constants._NOUNS @staticmethod def get_role(dep): """Indentify an entity's grammatical role""" if dep in Constants._SUBJECTS: return Constants.SUB elif dep in Constants._OBJECTS: return Constants.OBJ else: return Constants.OTHER class EntityGrid(object): ''' Entity grid >>> eg = EntityGrid('My friend is Bob. He loves playing basketball.') >>> 'friend' in eg.grid.columns and 'he' in eg.grid.columns True >>> 'he' not in eg.resolve_coreference().grid.columns True ''' def __init__(self, text): self.text = ' '.join([token for token in text.split(' ') if token not in Constants.REMOVE_ABBR]) self._data = CoreNLP.annotate(self.text) self._sentences = self._data['sentences'] # import pdb; pdb.set_trace() self._depens = [s['basicDependencies'] for s in self._sentences] self._entity_tokens = [ [t for t in s['tokens'] if t['pos'] in Constants.noun_tags()] for s in self._sentences ] self._noun2lemma = self._set_up_noun2lemma() self._grid = self._set_up_grid() @property def grid(self): """Entity grid""" return self._grid @property def nouns(self): """All nouns in text""" return self._noun2lemma.keys() @property def lemmas(self): """All lemmas in text""" return self._noun2lemma.values() def noun2lemma(self, noun): """Convert a noun to its lemma""" return self._noun2lemma[noun] if noun in self.nouns else None def _set_up_noun2lemma(self): noun2lemma = {} for token in self._entity_tokens: for ety in token: noun2lemma[ety['word']] = ety['lemma'] return noun2lemma def _set_up_grid(self): depens, entities, noun2lemma = self._depens, self._entity_tokens,\ self._noun2lemma assert len(depens) == len(entities) grid = defaultdict( lambda: [Constants.NOSHOW for i in range(len(depens))]) for i, (dep, ety) in enumerate(zip(depens, entities)): nouns = [e['word'] for e in ety] try: [d['dependentGloss'] for d in dep] except KeyError: pprint(dep) pprint(i) pprint(self.text) nouns_dp = [ d for d in dep if d['dependentGloss'] in nouns and d['dep'] != 'compound' ] for n_dp in nouns_dp: grid[noun2lemma[n_dp['dependentGloss']]][i] = \ Constants.get_role(n_dp['dep']) # yapf: disable return pd.DataFrame.from_dict(grid) def _map_phrase_to_entity(self, phrase): '''e.g. my friend => friend, friend in grid my friend is Bob => friend, friend and Bob in grid, choose former ''' nouns = [w for w in phrase.split(' ') if w in self.nouns] lemmas = [self.noun2lemma(w) for w in nouns if self.noun2lemma(w) in self.grid.columns] # pprint(lemmas) return lemmas[0] if lemmas != [] else None def _add_column(self, _c1, _c2): '''Add grid[c2] to grid[c1]''' assert len(self.grid[_c1]) == len(self.grid[_c2]) assert _c1 != _c2 col1, col2 = self.grid[_c1], self.grid[_c2] for i, _col1 in enumerate(col1): if _col1 == Constants.NOSHOW: col1[i] = col2[i] self.grid.pop(_c2) return _c1 def _add_columns(self, _c1, *c): '''Add columns of grid to the first''' reduce(self._add_column, [_c1] + list(c)) def resolve_coreference(self): '''Resolve coreference by merging columns in grid''' is_rep = 'isRepresentativeMention' for chain in [chains for chains in self._data['corefs'].values() if len(chains) > 1]: core_entity, other_entities = None, [] for cor in chain: word = self._map_phrase_to_entity(cor['text']) if word is not None and word not in other_entities: if cor[is_rep]: core_entity = word elif word != core_entity: other_entities.append(word) else: pass if core_entity is not None and other_entities != []: self._add_columns(core_entity, *other_entities) return self if __name__ == '__main__': doctest.testmod() <file_sep>/requirements.txt nltk>=3.1 numpy>=1.10.0 pandas>=0.18.0 requests>=2.9.1 scikit-learn==0.16.1 scipy>=0.17.0 <file_sep>/coheoka/corenlp.py # -*- coding: utf-8 -*- ''' Stanford CoreNLP wrapper ''' import json import requests class StanfordCoreNLP(object): def __init__(self, server_url): if server_url[-1] == '/': server_url = server_url[:-1] self.server_url = server_url def annotate(self, text, properties=None): if not properties: properties = {} r = requests.post(self.server_url, params={ 'properties': str(properties) }, data=text) output = r.text try: output = json.loads(output, strict=False) except ValueError: pass return output <file_sep>/README.md # coheoka Python coherence evaluation tool using Stanford's CoreNLP. This repository is designed for entity-based coherence. ## Prerequisite It is highly recommended to run a CoreNLP server on your own if you want to test coherence in this repository. You can download Stanford CoreNLP latest version (3.9.2) at [here](http://stanfordnlp.github.io/CoreNLP/download.html) and run a local server (requiring Java 1.8+) by this way: ``` java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer ``` Then there comes a demo at [`localhost:9000`](http://localhost:9000/), which visualizes StanfordCoreNLP's sophisticated annotations for English documents. Also, there is an online demo maintained by Stanford at [here](http://corenlp.run/). If you need to annotate lots of documents, you **must** set up a local server on your own. Or if you just want to test a few documents without downloading the CoreNLP tool, you may set an environment variable `CORENLP_URL` to use an existing server (e.g. `http://corenlp.run/` and don't forget the **`http`**). Also, if you are using Windows (actually, it is recommended to install pre-built binaries instead of building them by yourself whatever OS you choose), make sure you have installed any Python's scientific distribution such as [Anaconda](https://www.continuum.io/downloads) (if you want many scientific packages for future use) or [Miniconda](http://conda.pydata.org/miniconda.html) (if you don't want to spend too much disk space) which I strongly recommend. ## Install The requirements are `nltk`, `numpy`, `pandas`, `requests`, `scipy` and `scikit-learn`. If you have installed Anaconda or Miniconda just ``` conda create -n coheoka --file requirements.txt ``` and activate it by typing `activate coheoka` on Windows or `source activate coheoka` on Linux. Check out [conda documentation](http://conda.pydata.org/docs/using/envs.html#create-an-environment) for more details. ## Reference 1. <NAME>., & <NAME>. (2008). Modeling local coherence: An entity-based approach. Computational Linguistics, 34(1), 1-34. 2. <NAME>., & <NAME>. (2005, July). Automatic evaluation of text coherence: Models and representations. In IJCAI (Vol. 5, pp. 1085-1090). <file_sep>/coheoka/entity_transition.py # -*- coding: utf-8 -*- ''' Entity transition based on entity grid Reference: <NAME>., & <NAME>. (2008). Modeling local coherence: An entity-based approach. Computational Linguistics, 34(1), 1-34. ''' from __future__ import print_function, division import doctest from itertools import product from pprint import pprint import pandas as pd from entity_grid import EntityGrid, Constants class EntityTransition(object): ''' Local entity transition >>> eg = EntityGrid('I like apple juice. He also likes it.') >>> et = EntityTransition(eg.resolve_coreference()) >>> et.transition_table.shape[1] <= 4 True ''' def __init__(self, eg, n=2): self._n = n self._grid = eg.grid self._transition_table = self._column_transitions(n) @property def n(self): '''Return transition order n''' return self._n @property def grid(self): '''Return entity grid''' return self._grid @property def transition_table(self): '''Return transition table''' return self._transition_table def make_new_transition_table(self, another_n=3): '''Generate a new transition table''' self._transition_table = self._column_transitions(another_n) self._n = another_n return self def all_prob(self): '''Calculate a feature vector using all transitions''' seq = [Constants.SUB, Constants.OBJ, Constants.OTHER, Constants.NOSHOW] probs = {} for pro in product(seq, repeat=self.n): probs[''.join(pro)] = self.prob(pro) return probs def prob(self, tran): '''Calculate probability of a transition''' import operator as op assert len(tran) == self.n tbl = self.transition_table freq, total = 0, op.mul(*tbl.shape) for _col in tbl.columns: col = tbl[_col] freq += col.tolist().count(tuple(tran)) return freq / total def _column_transition(self, col, n): column = self.grid[col].tolist() if len(column) < n: # this is a trick to handle the case # where transition length is greater than the # number of sentences column_tran = [ tuple(column + [Constants.NOSHOW] * (n - len(column))) ] else: column_tran, tran_len = [], len(column) - n + 1 for i in range(tran_len): column_tran.append(tuple(column[i:i + n])) return column_tran def _column_transitions(self, n): transition_table = {} for col in self.grid.columns: transition_table[col] = self._column_transition(col, n) return pd.DataFrame.from_dict(transition_table) class TransitionMatrix(object): ''' Transition matrix >>> tm = TransitionMatrix(['I like apple juice. He also likes it.']) >>> 'SS' == tm.tran_matrix.columns[0] True >>> 'SO' == tm.tran_matrix.columns[1] True ''' def __init__(self, corpus, n=2, coref=True): self._corpus = corpus self._n = n self._tran_list = self._make_tran_list(coref, n) self._tran_matrix = self._make_tran_matrix() @property def corpus(self): '''Retrun the corpus''' return self._corpus @property def n(self): '''Return the order n of transitions''' return self._n @property def tran_list(self): '''Return transition list''' return self._tran_list @property def tran_matrix(self): '''Return sorted transition matrix ordered by S, O, X, -''' mat = self._tran_matrix seq = [Constants.SUB, Constants.OBJ, Constants.OTHER, Constants.NOSHOW] return mat[sorted(mat.columns, key=lambda x: [seq.index(c) for c in x])] @property def all_transitions(self): '''Return all transition produce by {S, O, X, N}^n''' seq = [Constants.SUB, Constants.OBJ, Constants.OTHER, Constants.NOSHOW] return [''.join(t) for t in product(seq, repeat=self.n)] def _make_tran_list(self, coref, n): tran_list = [] new_corpus = [] for i, doc in enumerate(self.corpus): try: if coref: eg = EntityGrid(doc).resolve_coreference() else: eg = EntityGrid(doc) tran_list.append(EntityTransition(eg, n)) new_corpus.append(doc) except (UnicodeError, TypeError) as e: print(doc) print('Error detected at {}: {}'.format(i, e)) self._corpus = new_corpus return tran_list def _make_tran_matrix(self): mat = {} for tran in self.all_transitions: mat[tran] = [t.all_prob()[tran] for t in self.tran_list] return pd.DataFrame.from_dict(mat) def test_et(text, n=2): pprint(text) eg = EntityGrid(text) et = EntityTransition(eg, n) pprint(et.transition_table) pprint(et.all_prob()) def test_tm(*test, **kw): tm = TransitionMatrix(test, kw['n']) pprint(tm.tran_matrix) if __name__ == '__main__': doctest.testmod() <file_sep>/coheoka/__init__.py # -*- coding: utf-8 -*- from entity_grid import EntityGrid, Constants from entity_transition import EntityTransition from evaluator import Evaluator from corenlp import StanfordCoreNLP
a0bf1580b510e0cb1154e8c4e2947aff23424e1f
[ "Markdown", "Text", "Python" ]
10
Markdown
kigawas/coheoka
e181d77f84665a4e24f28cd7d392a33b05db189d
4dc4a07d47cc71af82e947aeeb7c3105c088412a
refs/heads/master
<file_sep>package gameLogic; public class Cell implements Comparable<Cell> { private int row; private int column; /** * Cell Constructor. * * @param row_ - int ,row of cell. * @param column_ - - int , column of cell. */ public Cell(int row_, int column_) { row = row_; column = column_; } /** * returns the cell column. */ public int getColumn() { return column; } /** * returns the cell row. */ public int getRow() { return row; } /** * Update this cell to be : row +p's row , col +p's col, and return reference to this . * * @param p - point to add from . * @return reference to this cell after update(as described) */ Cell CellPlusCell(Cell p) { return new Cell(getRow() + p.getRow(), getColumn() + p.getColumn()); } Cell CellMinusCell(Cell p) { return new Cell(getRow() - p.getRow(), getColumn() - p.getColumn()); } /** * Copy constructor of Cell. * * @param p - cell to be copy. */ Cell(Cell p) { this.row = p.row; this.column = p.column; } /** * applied to print the cell. */ @Override public String toString() { return "(" + this.row + "," + this.column + ")"; } /** * Comparing two cells row by row , column by column . * * @param p - other cell . * @return true if and only if this row = to p's and same for column . */ @Override public int compareTo(Cell p) { if (this.row == p.row && this.column == p.column) { return 0; } else { return 1; } } } <file_sep>package gui.gameWin; import gameLogic.*; import gameLogic.Cell; import javafx.fxml.FXML; import javafx.fxml.Initializable; import javafx.scene.Scene; import javafx.scene.control.Label; import javafx.scene.layout.GridPane; import gui.graphicboard.GraphicBoard; import javafx.scene.layout.Pane; import javafx.scene.paint.Color; import javafx.scene.paint.CycleMethod; import javafx.scene.paint.LinearGradient; import javafx.scene.paint.Stop; import javafx.scene.shape.Ellipse; import javafx.stage.Stage; import java.io.IOException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Paths; import java.util.*; import java.util.List; public class GameController implements Initializable, Display { @FXML private GridPane boardContainer_; @FXML private Pane currPlayerImg; @FXML private Pane firstPlayerImg; @FXML private Pane secPlayerImg; private Pane[] playersImg; @FXML private Label firstPlayerScore; @FXML private Label secPlayerScore; @FXML private Label firstPlayerName; @FXML private Label secPlayerName; @FXML public javafx.scene.control.Label statusLabel; private GraphicBoard graphicBoard; private Color[] playersColor; private String[] playersName; private int boardSize; private Game game; private int[] scores; private LinearGradient[] PlayersLG; private Scene menuScene; @Override public void initialize(URL location, ResourceBundle resources) { boardContainer_.getChildren().remove(graphicBoard); statusLabel.setText(""); readSettings(); playersImg = new Pane[2]; playersImg[0] = firstPlayerImg; playersImg[1] = secPlayerImg; PlayersLG = new LinearGradient[2]; Stop[] stops = new Stop[]{new Stop(0, playersColor[0]), new Stop(1, Color.BLACK)}; PlayersLG[0] = new LinearGradient(0, 0, 2, 1, true, CycleMethod.REFLECT, stops); stops = new Stop[]{new Stop(0, playersColor[1]), new Stop(1, Color.BLACK)}; PlayersLG[1] = new LinearGradient(0, 0, 2, 1, true, CycleMethod.REFLECT, stops); Board board = new Board(boardSize, boardSize, new ArrayList<>(), new ArrayList<>()); game = new Game(boardSize, boardSize, this, this, board); this.scores = new int[2]; scores[0] = scores[1] = 0; // Initial scores : for (int i = 1; i <= boardSize; i++) { for (int j = 1; j <= boardSize; j++) { if (board.getCellValue(i, j) == TypesOf.Color.black) { scores[0]++; } else if (board.getCellValue(i, j) == TypesOf.Color.white) { scores[1]++; } } } int graphicBoardLength = 400; // Creating the graphic board graphicBoard = new GraphicBoard(playersColor); // Sets it's size graphicBoard.setPrefWidth(graphicBoardLength); graphicBoard.setPrefHeight(graphicBoardLength); boardContainer_.getChildren().add(0, graphicBoard); boardContainer_.setOnMouseClicked(event -> { int height = (int) boardContainer_.getPrefHeight(); int width = (int) boardContainer_.getPrefWidth(); int cellHeight = height / board.getRows(); int cellWidth = width / board.getColumns(); int squreSide = cellHeight >= cellWidth ? cellWidth : cellHeight; squreSide -= 1; Integer x = (int) Math.floor(event.getSceneX() / squreSide) + 1; Integer y = (int) Math.floor(event.getSceneY() / squreSide) + 1; Cell c = new Cell(y, x); // status get's reset statusLabel.setText(""); // makes a move game.makeMove(c); }); boardContainer_.widthProperty().addListener((observable, oldValue, newValue) -> { double boardNewWidth = newValue.doubleValue() - 120; graphicBoard.setPrefWidth(boardNewWidth); boardContainer_.setPrefWidth(boardNewWidth); graphicBoard.draw(board, game.getCurrPlayerValidMoves()); notification(); }); boardContainer_.heightProperty().addListener((observable, oldValue, newValue) -> { double boardNewWidth = newValue.doubleValue() - 120; graphicBoard.setPrefHeight(boardNewWidth); boardContainer_.setPrefHeight(boardNewWidth); graphicBoard.draw(board, game.getCurrPlayerValidMoves()); notification(); }); } /** * General controllers update .(according to the current player) */ private void notification() { int currPlayer = game.getCurrPlayerIndex(); Ellipse currPlayerDisk = new Ellipse(40, 40, 20, 10); currPlayerDisk.setFill(PlayersLG[currPlayer]); currPlayerImg.getChildren().clear(); currPlayerImg.getChildren().add(0, currPlayerDisk); Ellipse firstPlayerDisk = new Ellipse(40, 40, 20, 10); firstPlayerDisk.setFill(PlayersLG[0]); playersImg[currPlayer].getChildren().clear(); playersImg[currPlayer].getChildren().add(0, firstPlayerDisk); Ellipse secPlayerDisk = new Ellipse(40, 40, 20, 10); secPlayerDisk.setFill(PlayersLG[1]); playersImg[1].getChildren().clear(); playersImg[1].getChildren().add(0, secPlayerDisk); firstPlayerScore.setText(String.valueOf(scores[0])); secPlayerScore.setText(String.valueOf(scores[1])); } /** * Reading the settings from the file , thus extracting the data from it. */ private void readSettings() { try { playersColor = new Color[2]; playersName = new String[2]; List<String> settings = Files.readAllLines(Paths.get("settings.txt")); playersName[0] = settings.get(0); playersColor[0] = Color.valueOf(settings.get(1)); playersName[1] = settings.get(2); firstPlayerName.setText(playersName[0] + ":"); secPlayerName.setText(playersName[1] + ":"); playersColor[1] = Color.valueOf(settings.get(3)); boardSize = Integer.valueOf(settings.get(4)); } catch (IOException e) { e.printStackTrace(); } } /*********************************************************************************************************** **************************************DISPLAY INTERFACE METHODS******************************************** ***********************************************************************************************************/ @Override public void show(Board board, List<gameLogic.Path> moves, int currPlayer, boolean passTurn, int blacks, int whites) { graphicBoard.draw(board, game.getCurrPlayerValidMoves()); scores[0] = blacks; scores[1] = whites; notification(); if (passTurn) { statusLabel.setText("No moves are possible for " + playersName[1 - currPlayer] + ", turn passed to " + playersName[currPlayer]); } } @Override public void showError(TypesOf.Error errorType) { if (errorType == TypesOf.Error.notValidMove) { statusLabel.setText("Not Valid Move , please click on one of the marked cells only !"); } } @Override public void showEndGameStatus(TypesOf.GameStatus gameStatus) { if (gameStatus == TypesOf.GameStatus.blackWon) { statusLabel.setText(playersName[0] + " won !"); } else if (gameStatus == TypesOf.GameStatus.whiteWon) { statusLabel.setText(playersName[1] + " won !"); } else if (gameStatus == TypesOf.GameStatus.tie) { statusLabel.setText("It's a tie , try again !"); } // No more pressing is allowed ! boardContainer_.setOnMouseClicked(event -> { }); } /** * Returns to the main menu. */ @FXML public void Menu() { ((Stage) boardContainer_.getScene().getWindow()).setScene (menuScene); } //Saving the menu a load up not load menu more than once run per execution public void setMenuScene(Scene menuScene_) { this.menuScene = menuScene_; } } <file_sep>package gameLogic; import java.util.List; public class HumanPlayer extends Player { HumanPlayer(PlayerInput inputHandler, Counter discsCounter, TypesOf.Color color) { super(); this.inputHandler = inputHandler; this.discsCounter = discsCounter; this.color = color; } int getMenuSelection() { return inputHandler.getMenuSelection(); } @Override public Cell chooseAndReturnMove(List<Path> availableMovePaths) { return inputHandler.getLandingPoint(); } } <file_sep>package gameLogic; import java.util.List; public interface GameLogic { /** * Return valid moves , each move with it's path - it's trajectories to "eat" on the board . * * @param board - the board to check the player in the given color's moves. * @param color - of the player . * @return all of the valid moves available for the player which color was given . */ List<Path> validMovePaths(Board board, TypesOf.Color color); /** * Returns the current Game Status which could be : noValidMoves , whitwWon , blackWon , passTurn(the player cannot * play but the other one can play ) and tie . * * @param currPlayerColor . * @param blacks - how many blacks are there on the board . * @param whites - how many whites are there on the board . * @return the curr game status as described . */ TypesOf.GameStatus currGameStatus(Board board, boolean currPlayerhasMoves, TypesOf.Color currPlayerColor, int blacks, int whites); } <file_sep>package gui.ConfigWin; import java.io.*; import javafx.fxml.FXML; import javafx.fxml.Initializable; import javafx.scene.Scene; import javafx.scene.control.ColorPicker; import javafx.scene.control.ComboBox; import javafx.scene.control.Label; import javafx.scene.control.TextField; import javafx.scene.layout.GridPane; import javafx.scene.paint.Color; import javafx.stage.Stage; import java.net.URL; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Collections; import java.util.List; import java.util.ResourceBundle; public class ConfigWinController extends GridPane implements Initializable { @FXML private ColorPicker firstColorCheckbox; @FXML private ColorPicker secColorCheckbox; @FXML private ComboBox boardSizeCheckbox; @FXML private TextField firstNameField; @FXML private TextField secondNameField; @FXML private Label firstTimeErrorLabel; private String firstName = "Bob"; private String secName = "Alice"; private Scene menuScene; @Override public void initialize(URL location, ResourceBundle resources) { loadSettings(); } /** * Saves the chosen settings in the file of settings. */ public void saveSettings() { // Extracting data from the controllers. Color firstPlayerColor = firstColorCheckbox.getValue(); Color secondPlayerColor = secColorCheckbox.getValue(); Integer n = (Integer) boardSizeCheckbox.getValue(); String firstNameStr = firstNameField.getText(); if (firstNameStr.equals("")) { firstNameStr = this.firstName; } else { this.firstName = firstNameStr; } String secondNameStr = secondNameField.getText(); if (secondNameStr.equals("")) { secondNameStr = secName; } else { this.secName = secondNameStr; } // Constructing the coded data as string . String data = firstNameStr + "\n" + firstPlayerColor + "\n" + secondNameStr + "\n" + secondPlayerColor + "\n" + n; // Writing. Path file = Paths.get("settings.txt"); try { Files.write(file, Collections.singleton(data), Charset.forName("UTF-8")); } catch (IOException e) { e.printStackTrace(); } this.firstTimeErrorLabel.setText(""); exitSettingsWindow(); setNamesToDisplay(); } /** * Exits the windows if cancel / apply is pressed. */ public void exitSettingsWindow() { ((Stage) firstColorCheckbox.getScene().getWindow()).setScene (menuScene); } public void setSettingsFileRequired(String error) { firstTimeErrorLabel.setText(error); } @FXML public void firstPlayerColorChanged() { playerColorChanged(0); } @FXML public void secPlayerColorChanged() { playerColorChanged(1); } /** * Checks if the players colors are the same , and doesn't allow it. * * @param player */ private void playerColorChanged(int player) { if (firstColorCheckbox.getValue().equals(secColorCheckbox.getValue())) { Color defaultColor = Color.valueOf("Black"); if (defaultColor.equals(firstColorCheckbox.getValue())) defaultColor = Color.valueOf("White"); if (player == 0) firstColorCheckbox.setValue(defaultColor); else secColorCheckbox.setValue(defaultColor); } } private void loadSettings() { try { Path path = Paths.get("settings.txt"); if (Files.exists(path)) { List<String> settings = Files.readAllLines(path); firstColorCheckbox.setValue(Color.valueOf(settings.get(1))); secColorCheckbox.setValue(Color.valueOf(settings.get(3))); boardSizeCheckbox.setValue(Integer.valueOf(settings.get(4))); this.firstName = settings.get(0); this.secName = settings.get(2); setNamesToDisplay(); } } catch (IOException e) { e.printStackTrace(); } } /** * Sets the names to the corresponding fields on the screen. */ private void setNamesToDisplay() { firstNameField.setPromptText("Enter first player name here (current is: " + firstName + ")"); secondNameField.setPromptText("Enter second player name here (current is: " + secName + ")"); firstNameField.setText(""); secondNameField.setText(""); } public void setMenuScene(Scene menuScene_) { this.menuScene = menuScene_; } } <file_sep>package gameLogic; public class Attack { private Cell curr; private Path path; private int index; Attack(Path path) { this.path = path; this.curr = path.getLanding(); this.index = 0; } /** * indicate if we can call getNext function , aka there is a next cell . * @return true if and only if there is a next cell . */ boolean hasNext() { if (index < path.numberOfEatingDirections()) { if ((curr.CellMinusCell(path.getDirection(index))).compareTo(path.getStopCell(index)) != 0) { return true; } else { index++; curr = path.getLanding(); if (index < path.numberOfEatingDirections()) { curr = curr.CellPlusCell(path.getDirection(index)); } return hasNext(); } } else { return false; } } /** * @return the next disc position which need to be flip. */ Cell getNext() { curr = curr.CellPlusCell(path.getDirection(index)); return curr.CellMinusCell(path.getDirection(index)); } } <file_sep>package gameLogic; public class TypesOf { public enum Color { empty, black, white } ; public enum Error { outOfBounds, notIntegers, notValidMove } ; public enum GameStatus { whiteWon, blackWon, noOneWon, tie, passTurn } ; }
f7c26118595a82e817c9de0fcb2296b01ecd293b
[ "Java" ]
7
Java
asheryi/ReversiGui
56ad06fd1dbc37f8ccc17c27cf79f036b372c8bf
4c8f79aff5df9897902dae7fc9bae0cb7dd45e39
refs/heads/main
<file_sep>//Create variables here var dog, happyDog; var database, foodS, foodStock; var DogIMG, HappyDogIMG; function preload(){ //load images here DogIMG = loadImage("dogImg.png"); HappyDogIMG = loadImage("dogImg1.png"); } function setup() { database = firebase.database(); foodStock = database.ref('Food'); foodStock.on("value", readStock); createCanvas(500, 500); dog = createSprite(250, 350, 10, 10); dog.addImage(DogIMG); dog.scale = 0.4; } function draw() { background (50, 109, 255); if (keyWentDown(UP_ARROW)){ writeStock(foodS); dog.addImage(HappyDogIMG); } if (keyWentUp(UP_ARROW)){ dog.addImage(DogIMG); } if (foodS === 0){ foodS = 50; } drawSprites(); //add styles here fill("yellow"); textSize(20) text("Note:Press Up_Arrow Key To Feed Milk",80,50); text("Food Remaining: " + foodS, 150, 150); } function readStock(data){ foodS=data.val(); } function writeStock(x){ if (x<=0){ x=0; }else { x=x-1 } database.ref('/').update({Food:x}) }
055b988f45bd3620a64ca9a40773b19eef6b2f7e
[ "JavaScript" ]
1
JavaScript
ankita539/virtual1
1134b6b46c8ffcdfd816d06c63df1c089c5fd50d
07a2767089a9c0a8ce2e92f3ccf662f4c2b9c0b9
refs/heads/dev
<repo_name>sdavid09/HandBot<file_sep>/src/user/level/level.js const { voice_xp, max_lvl } = require('../../../conf/config.json'); class XPModifiers { static voiceXPModifier(number_of_users) { // increase 1% xp modifier per user in same channel let xp = voice_xp * ( 1 + ( number_of_users / 100 )) return xp; } } class Level { constructor(xp=0, next_rank_xp=0, xp_to_next_rank=0) { this.xp = xp; this.level = 1; this.increment = 0; // amount of xp per level this.xp_to_next_rank = xp_to_next_rank; this.next_rank_xp = next_rank_xp; // if values passed in setup level if (this.xp_to_next_rank && this.xp) { this.calculateLevel(); } } getLevel() { return this.level; // return user level } calculateLevel(){ // setup user level calcuate based on xp to next rank this.increment = Math.round((this.xp_to_next_rank / max_lvl)) // if (this.increment && this.xp) { // calculate level this.level = Math.abs(Math.floor(((this.next_rank_xp - this.xp) / (this.increment)) - ((max_lvl)))) } } levelUp(id) { // increase level } } module.exports = { Level, XPModifiers };<file_sep>/test/src/extra/giphy.test.js // const assert = require('chai').assert; // const giphy = require('../../../src/extra/giphy'); // describe('GIPHY API', function() { // describe('GIF Welcome URL', function() { // it('Check if URL is string', async function () { // let gif_url = await giphy.gifMessage("welcome" ,'pg-13'); // assert.typeOf(gif_url, 'string', 'URL is string'); // }) // }) // }) <file_sep>/src/utils/command.js // class Command { // constructor(name="", description="", permission = "") { // this.name = name; // this.description = description; // this.permission ="" // } // } // module.exports = { // Command // };<file_sep>/test/src/user/level/level.test.js const expect= require('chai').expect; const { XPModifiers, Level} = require('../../../../src/user/level/level'); describe('XP', function() { describe('Check User XP Value', function() { it('Check if XP is a number', async function () { let number_of_users = 5; expect(XPModifiers.voiceXPModifier(number_of_users)).to.be.an('number'); }) }) }) describe('Level', function() { describe('Check User Level', function() { it('Check if User Level Calculated Correctly 1', async function () { // assuming max_level from config is 100 let level = new Level(231, 5000, 5000); expect(level.level).to.be.an('number') && expect(level.level).to.equal(5); }) it('Check if User Level Calculated Correctly 2', async function () { // assuming max_level from config is 100 let level = new Level(4999.9, 5000, 5000); expect(level.level).to.be.an('number') && expect(level.level).to.equal(100); }) it('Check if User Level Calculated Correctly 3', async function () { // assuming max_level from config is 100 let level = new Level(45000, 50000, 25000); expect(level.level).to.be.an('number') && expect(level.level).to.equal(80); }) }) }) <file_sep>/README.md # The Hand Bot The Hand Bot is a discord bot that adds ranks, currency, levels, and games. The Hand Bot focused on the Medieval Age for ranks. ## Setup Currently needs roles created in discord server beforehand. The bot needs to be ranked higher than rest permissions. Also set "Display role members separately from online members" to true to show your roles in your sever. ## Installation Requirements: *node *npm ```bash npm install ``` ## Test ```bash npm run test --exit ``` ## Run ```bash node .\index.js chcon -Rt svirt_sandbox_file_t <path> podman run -v /opt/HandBot/data:/opt/HandBot/data -d --name bot localhost/handbot ``` ## Contributing <NAME> <file_sep>/Dockerfile # base image where node is installed FROM node:latest RUN mkdir -p /opt/HandBot COPY . /opt/HandBot RUN cd /opt/HandBot \ && npm install WORKDIR /opt/HandBot CMD ["node", "index.js"] <file_sep>/src/games/threedice.js /* Passe-dix style game where user rolls 3 dice. If the total sum of all three dice * is greater than 11, user get double their bet. Else they lose their bet. * */ class ThreeDice { constructor() { this.dice1; this.dice2; this.dice3; } rollDice() { this.dice1 = Math.floor(Math.random() * 6) + 1; this.dice2 = Math.floor(Math.random() * 6) + 1; this.dice3 = Math.floor(Math.random() * 6) + 1; return this.dice1 + this.dice2 + this.dice3; } play(bet=0) { let sum = this.rollDice(); if(isNaN(bet) || bet <=0 || bet % 1 != 0) { return false; } else if (sum >= 11) { return bet * 2; } else { return -bet; } } } module.exports = { ThreeDice };<file_sep>/src/commands/money.js const {User} = require('../user/base/user'); const {getUserFromMention} = require('../utils/user_mention'); module.exports.run = async(client, message, args ) => { let user_to_search_id = message.author.id; if(args.length >= 1) { let match = getUserFromMention(args[0]); if(match) { user_to_search_id = match; } } let user = await new User(user_to_search_id).get(); message.channel.send(`User: ${user.name } Money: ${user.money}`); } module.exports.help = async(client, message, args ) => { let help = "```Money:\nDisplays User Money\nOptions:\n @<user>\n\tExample: !money @FormulaLight```" message.channel.send(help); } <file_sep>/src/db/db.js const sqlite3 = require('sqlite3').verbose(); class DB { constructor() { this.db_file = './data/BOT_DATA.db'; } run(sql, params=[]) { this.db = new sqlite3.Database(this.db_file) let get_db_promise = new Promise((resolve, reject)=> { this.db.run(sql, params, (err) => { if (err) return reject(err); resolve(); }); }); this.db.close(); return get_db_promise; } get (sql, params=[]) { this.db = new sqlite3.Database(this.db_file) let get_db_promise = new Promise((resolve, reject)=> { this.db.get( sql, params, ( err, rows ) => { if (err) return reject(err); resolve( rows ); }); }); this.db.close(); return get_db_promise; } } module.exports = { DB };<file_sep>/src/events/levelUp.js module.exports = (client) => { console.log("User Level Up"); } <file_sep>/src/events/voiceStateUpdate.js const { User } = require ('../user/base/user'); const { voice_xp } = require('../../conf/config.json'); module.exports = async (client, oldMember, newMember) => { let oldUserChannel = oldMember.voiceChannel; let newUserChannel = newMember.voiceChannel; if(oldUserChannel === undefined && newUserChannel !== undefined) { // console.log(`OldUserChannel : ${newMember.voiceChannelID}`) // User Joins a voice channel let users_in_channel = newUserChannel.members.keyArray().length; let user = await new User(newMember.id).get(); // Todo: calulate how long in channel for xp modifier user.addXP(voice_xp); let promotion = await user.checkForRankPromotion(); if(promotion) { client.emit('rankPromotion', client, user) // call rankPromotion event } user.save(); } else if(newUserChannel === undefined) { // User leaves a voice channel } } <file_sep>/index.js /* <NAME> Hand Bot Description: Bot to add moderation, xp, currency, to discord */ // Configs and Dependencies const Discord = require('discord.js'); const { token } = require('./conf/token.json'); const { server_id } = require('./conf/config.json'); const { User } = require('./src/user/base/user'); const { Rank } = require('./src/user/rank/rank'); const giphy = require('./src/extra/giphy'); const { ServerDBConnector } = require('./src/db/server_db'); const {DailyBonus} = require('./src/bonus/daily') const fs = require('fs'); let daily = new DailyBonus(); const client = new Discord.Client(); var servers = client.guilds; // get all servers /* On Bot Startup */ client.once('ready', async () => { console.log('The HandBot is ready to serve the kingdom!'); let server = getServerInfo(server_id); // returns guild let users = server.members; user_list = users.keyArray() setupServersTable(); setupUsersTable(users, user_list); daily.runDailyTasks(server); }); // Load all Additional Events in events folder fs.readdir('./src/events/' , (err, files)=>{ if(err) { return console.log(err) } files.forEach(file=>{ let event = require(`./src/events/${file}`); let event_name = file.split('.')[0]; client.on(event_name, event.bind(null, client)); }); }); client.commands = new Map(); // Load Commands fs.readdir('./src/commands/' , (err, files)=>{ if(err) { return console.log(err) } files.forEach(file=>{ let command = require(`./src/commands/${file}`); let command_name = file.split('.')[0]; client.commands.set(command_name, command) }); }); /* Helper Functions*/ function addUserToRole(member, user) { // takes a guild member and gives default role let server = getServerInfo(server_id); // returns guild let user_role = user.rank; let server_role = server.roles.find(role=>role.name === user.rank); if(server_role) { member.addRole(server_role).catch(console.error); } else { console.log(`${server_role} Role not setup on server!`); } } async function setupUsersTable(users, user_list, server) { for( let i of user_list ) { let member = users.get(i) let username = member.user.username ; let user_id = member.user.id; let user = await new User(user_id).get(); user.setName(username); user.server = server_id; user.save(); addUserToRole(member, user); } }; async function setupServersTable() { let server_db_connector = new ServerDBConnector(); for(let server of servers.keys()) { let server_info = getServerInfo(server); server_db_connector.save(server, server_info.name) } } function getServerInfo(server_id){ return servers.get(server_id); }; client.login(token); <file_sep>/src/commands/gamble.js const {ThreeDice} = require('../games/threedice') const{User} = require('../user/base/user'); module.exports.run = async(client, message, args ) => { if(args.length !== 1 ) { message.channel.send('Invalid Arguments check help !gamble --help'); return } let game = new ThreeDice() let user = await new User(message.author.id).get(); if(user.money >= args[0]) { let gamble = game.play(args[0]); if(gamble) { if ( gamble >= 1) { message.channel.send(`${user.name} You Won! You rolled |${game.dice1}| |${game.dice2}| |${game.dice3}| Total: ${game.dice1+game.dice2+game.dice3}`); user.updateMoney(gamble) user.save(); } else { message.channel.send(`${user.name} Sorry You Lost ${args[0]} :( You rolled |${game.dice1}| |${game.dice2}| |${game.dice3}| Total: ${game.dice1+game.dice2+game.dice3}`); user.updateMoney(gamble) user.save(); } } else { message.channel.send(`Invalid Arguments Type !gamble --help`); return } } else if ( user.money < args[0]) { message.channel.send(`Your balance is too low!`); } else { message.channel.send(`Invalid argument type`); } } module.exports.help = async(client, message, args ) => { let help = "```Gamble:\nRoll 3 dice and if sum is greater than 11 you win double your bet! Else you lose :(\nCheck your balance before! \nOptions:\n <amount>\nExample: !gamble 25```" message.channel.send(help); } <file_sep>/src/events/rankPromotion.js const{ server_id } = require('../../conf/config.json') module.exports = async (empty, client, user) => { let servers = client.guilds; let server = servers.get(server_id); let member = server.members.get(user.id) let server_role = server.roles.find(role=>role.name === user.rank); if(server_role) { member.addRole(server_role).catch(console.error); } } <file_sep>/src/events/presenceUpdate.js const {DailyBonus} = require('../bonus/daily'); const {User} = require('../user/base/user'); module.exports = async (client, oldMember, newMember) => { if(oldMember.presence.status !== newMember.presence.status){ if(newMember.presence.status === "online") { let user = await new User(newMember.id).get(); let daily_bonus = await new DailyBonus().giveDailyBonus(user); } } } <file_sep>/src/user/base/user.js const { Rank } = require ('../rank/rank'); const { Level, XPModifiers } = require ('../level/level'); const { Money } = require ('../../economy/money'); const { UserDBConnector } = require('../../db/user_db'); let db = new UserDBConnector(); class User { constructor(id) { this.id = id; this.name = ""; this.xp = 0; this.rank = "Peasant"; this.level = 1; this.money = 25; this.server = 0; this.rank_img = "Peasant.png"; } setName(name) { this.name = name; } updateMoney(amount) { this.money += amount; if(this.money < 0) { this.money = 0; } } checkLevel() { let rank = new Rank() let next_rank = rank.getNextRank(this.rank) let level = new Level(this.xp, rank.getRankXP(next_rank), rank.getXPToNextRank(this.rank)) return level.getLevel(); } async checkForRankPromotion() { let rankclass= new Rank(); let user_rank = rankclass.checkForRankPromotion(this.rank, this.xp) this.level = this.checkLevel(); if (user_rank) { this.rank = user_rank; this.money += rankclass.getRankBonus(user_rank); this.rank_img = await Rank.getRankImage(user_rank); this.level = 1; return true; // if user ranked up } return false } addXP(xp) { this.xp += xp; } async save() { await db.save(this) } async get() { let user = await db.get(this.id); if (user) { // if user already in db get data and create user object this.name = user.name; this.xp = user.xp; this.level = user.level; this.rank = user.rank; this.money = user.money; this.server = user.server; this.rank_img = await Rank.getRankImage(user.rank); } return this; } } module.exports = { User };<file_sep>/src/commands/xp.js const {User} = require('../user/base/user'); const {getUserFromMention} = require('../utils/user_mention'); module.exports.run = async(client, message, args ) => { let user_to_search_id = message.author.id; if(args.length >= 1) { let match = getUserFromMention(args[0]); if(match) { user_to_search_id = match; } } let user = await new User(user_to_search_id).get(); message.channel.send(`User: ${user.name } XP: ${user.xp}`); } module.exports.help = async(client, message, args ) => { let help = "```XP:\nCommand Displays User XP\nOptions:\n @<user>\n\tExample: !xp @FormulaLight```" message.channel.send(help); }<file_sep>/src/utils/server.js const {server_id} = require('../../conf/config.json'); function getServer(client) { let servers = client.guilds; // get all servers return servers.get(server_id); } <file_sep>/src/commands/help.js module.exports.run = async(client, message, args ) => { let commands = "```Commands:\n\ !help -- displays all commands\n\ !xp -- displays user xp\n\ !money -- display user money\n\ !stats -- displays all user stats\n\ !gamble <value> -- Roll 3 dice and if sum is greater than 11 you win double your bet!\n\ \n--------------------------\n\ Options: <cmd> [--help][-h] ```\ " message.channel.send(commands); } module.exports.help = async(client, message, args ) => { }<file_sep>/src/events/message.js const { User } = require ('../user/base/user'); const { message_xp, prefix } = require('../../conf/config.json'); const{ server_id, bot_channel } = require('../../conf/config.json') module.exports = async(client, message ) => { if (message.author.bot) return; let user = await new User(message.author.id).get(); if ((message.content.startsWith(`${prefix}`)) && (message.channel.name === bot_channel)) { let full_command = message.content.split(" ") let cmd = full_command[0].replace(`${prefix}`, ""); // get the command from let command = client.commands.get(cmd); if(command) { // check user permissions let args = [] if(full_command.length > 1) { full_command.shift() // remove first element which is command args = full_command; } if(args[0] === "--help" || args[0] === "-h") { command.help(client, message, args); } else { command.run(client, message, args); } } } else if((message.content.startsWith(`${prefix}`)) && (message.channel.name !== bot_channel)) { return; } else { user.addXP(message_xp); // gives xp to user if message sent in chat let promotion = await user.checkForRankPromotion(); if(promotion) { client.emit('rankPromotion', client, user) // call rankPromotion event } user.save(); } }<file_sep>/test/src/user/base/user.test.js const assert = require('chai').assert; const expect = require('chai').expect; const { User } = require('../../../../src/user/base/user'); const { UserDBConnector } = require('../../../../src/db/user_db'); let db = new UserDBConnector(); describe('User', function() { describe('Check if User in Database', function() { it('Function should Create User Object', async function() { user_id = '171782598798999552'; let user = await new User(user_id).get(); expect(user.xp).to.be.an('number'); }) }) describe('Check Add Xp', async function() { user_id = '1234567'; let user = await new User(user_id); it('Test User Xp = 0', async function() { expect(user.xp).to.equal(0) && expect(user.level).to.equal(1); }) it('Test Add Xp to User', async function() { user.addXP(2500); expect(user.xp).to.equal(2500) && expect(user.rank).to.equal("Peasant") && // expect(user.level).to.equal(50) && expect(user.money).to.equal(25); }) it('Check For user Promotion', async function() { user.addXP(2500); expect(user.xp).to.equal(5000) && // expect(user.rank).to.equal("Merchant") && expect(user.level).to.equal(1); // expect(user.money).to.equal(75) ; }) }) })<file_sep>/test/src/user/rank/rank.test.js const expect= require('chai').expect; const { Rank } = require('../../../../src/user/rank/rank'); let test_rank = "Lord" let rank = new Rank(); describe('Rank Config Values', function() { it('Make Sure Rank Values are not null', async function () { let all_ranks = rank.getAllRanks(); expect(all_ranks).to.be.an('array').that.is.not.empty; }) it('Check if Rank Exists', async function () { let all_ranks = rank.getAllRanks(); expect(all_ranks.indexOf(test_rank)).to.not.equal(-1) }) it('Get Rank XP', async function () { let rank_xp = rank.getRankXP(test_rank); expect(rank_xp).to.be.an('number'); }) it('Check if Can Be Promoted', async function () { let current_xp = 5000; let current_rank = 'Peasant' let promote = rank.checkForRankPromotion(current_rank, current_xp); expect(promote).to.be.not.false; }) it('Get Rank', async function () { let current_rank = 'Peasant' let search_rank = rank.getRank(current_rank); expect(search_rank).to.be.an('object').that.is.not.empty; }) it('Get Default Rank', async function () { let search_rank = rank.getRankDefault(); let default_rank = "Peasant"; expect(search_rank).to.be.an('string').that.is.not.empty && expect(search_rank).to.equal(default_rank); }) it('Get next Rank 1', async function () { let default_rank = "Peasant"; let next_rank = "Merchant" let search_rank = rank.getNextRank(default_rank); expect(search_rank).to.be.an('string').that.is.not.empty && expect(search_rank).to.equal(next_rank); }) it('Get next Rank 2', async function () { let default_rank = "King"; let next_rank = "King" let search_rank = rank.getNextRank(default_rank); expect(search_rank).to.be.an('undefined'); }) it('Get next Rank XP', async function () { let default_rank = "Peasant"; let next_rank = "Merchant" let next_rank_xp = rank.getXPToNextRank(default_rank); expect(next_rank_xp).to.be.an('number') && expect(next_rank_xp).to.equal(5000); }) })<file_sep>/src/bonus/daily.js const {daily_money_bonus, daily_xp_bonus} = require('../../conf/config.json'); const {DailyBonusDBConnector} = require('../db/daily_bonus'); const {User} = require('../user/base/user') const cron = require('cron'); let db = new DailyBonusDBConnector(); class DailyBonus { runDailyTasks(server) { //'0 8 * * *' for Every Day at 8:00 am const job = cron.job('0 8 * * *', async () => { console.log(`${new Date()} Running Daily Bonus Job`); await db.deleteAllDailyBonusRecords(); await this.checkAllUsersForBonus(server); }); job.start() } async giveDailyBonus(user) { let user_bonus = await this.checkIfAlreadyLoggedInForDay(user.id); if(user_bonus === undefined) { await db.addUser(user) let date = new Date(); console.log(`${date} Daily Bonus Added: ${user.name}`); user.addXP(daily_xp_bonus); user.updateMoney(daily_money_bonus); user.save(); } } async checkAllUsersForBonus(server){ let users = server.members; user_list = users.keyArray() for( let i of user_list ) { let member = users.get(i) if (!member.user.bot) { if(member.presence.status === "online") { let user = await new User(member.user.id).get(); await this.giveDailyBonus(user); } } } } async checkIfAlreadyLoggedInForDay(id) { let user = await db.get(id) return user; } } module.exports = { DailyBonus };<file_sep>/src/economy/money.js let ranks = require('../../conf/config.json'); class Money { constructor(value=0) { this.value = 0; } getUserMoney() { // return user money } } module.exports = { Money };
aafae52c38ff7ae876095818756055d49fece3e3
[ "Markdown", "JavaScript", "Dockerfile" ]
24
Markdown
sdavid09/HandBot
edcfa674fe5b0ee2b285511496ad2aaa3999c58c
1669838a8b6b07389985d33b21fe6259bccbec4e
refs/heads/master
<file_sep>import { Component, OnInit } from '@angular/core'; import { HttpClient } from '@angular/common/http'; import { Planet } from '../model/Planet'; @Component({ selector: 'app-shortestpath', templateUrl: './shortestpath.component.html', styleUrls: ['./shortestpath.component.css'] }) export class ShortestpathComponent implements OnInit { planets; startPlanet; endPlanet; planetHops = new Array<Planet>(); submitFlag = false; hopsSize = new Array<number>(); submitCount; constructor(public httpClient: HttpClient) { } ngOnInit() { this.createGalaxy(); this.getAllPlanets(); } createGalaxy() { this.httpClient.post('http://localhost:8081/creategalaxy', {}); } getAllPlanets() { this.httpClient.get('http://localhost:8081/getAllPlanets') .subscribe(data => { this.planets = data; }); } findShortestPath() { this.httpClient.get('http://localhost:8081/calculateshortestpath' + '/' + this.startPlanet + '/' + this.endPlanet) .subscribe(data => { this.capturePlanetHops(data); }); this.hopsSize = []; this.submitFlag = true; } captureSource(event) { this.startPlanet = event.target.value; } captureTarget(event) { this.endPlanet = event.target.value; } capturePlanetHops(planets) { this.planetHops = planets; for (let i = 0 ; i < this.planetHops.length; i++) { this.hopsSize.push(i); } } } <file_sep>export class Planet { name; }
82bae2179e20a4f1a03458c5bc6a44ae770a6751
[ "TypeScript" ]
2
TypeScript
SudarshanaReddy/ShortestPath-Angular
d607785c0e9fbf90d01066a347ad60fab76443dc
64e2404f688a02451fc95faf409863cc5d571089
refs/heads/master
<repo_name>r3ello/springBatch<file_sep>/src/main/java/com/rbello/ExampleSpringBatch/Transaction.java package com.rbello.ExampleSpringBatch; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.RequiredArgsConstructor; import lombok.Setter; import lombok.ToString; @AllArgsConstructor @NoArgsConstructor @Getter @Setter @ToString @Builder public class Transaction { private int id; private double amount; private String description; private String type; } <file_sep>/README.md # Spring Batch Example using Spring Batch and Spring Boot to process transactions. ## First step: read Transaction from JSON using ``` @Bean public JsonItemReader<Transaction> transactionReader() { return new JsonItemReaderBuilder<Transaction>() .jsonObjectReader(new JacksonJsonObjectReader<>(Transaction.class)) .resource(new ClassPathResource("transaction.json")) .name("transactionJsonItemReader") .build(); } ``` ## Next step: change Change the sign of the amount, all DEBIT transactions will have a negative sign in the Database ``` public class TransactionItemProcessor implements ItemProcessor<Transaction, Transaction>{ @Override public Transaction process(Transaction t) throws Exception { return Transaction.builder() .description(t.getDescription()) .id(t.getId()) .type(t.getType()) .amount(t.getType().equals("CREDIT") ? t.getAmount() : -1*t.getAmount()) .build(); } } ``` ## Next step Write transaction item in Data Base using: ``` @Bean public JdbcBatchItemWriter<Transaction> transactionWriter(DataSource dataSource) { return new JdbcBatchItemWriterBuilder<Transaction>() .itemSqlParameterSourceProvider(new BeanPropertyItemSqlParameterSourceProvider<>()) .sql("INSERT INTO transaction (id, amount,type,description) VALUES (:id, :amount, :type, :description)") .dataSource(dataSource) .build(); } ``` # Dependencies ``` <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-batch</artifactId> </dependency> ``` # User memory Data Base ``` spring.datasource.url=jdbc:h2:mem:device spring.datasource.driverClassName=org.h2.Driver spring.datasource.username=test spring.datasource.password=<PASSWORD> ``` disable automatic spring batch run ``` spring.batch.job.enabled=false ``` ## JSON Data Example ``` [ { "id": 1, "amount": 100.2, "type": "CREDIT", "description": "description1" }, { "id": 2, "amount": 542.05, "type": "CREDIT", "description": "description2" }, { "id": 3, "amount": 985.35, "type": "CREDIT", "description": "description3" }, { "id": 4, "amount":741.52, "type": "DEBIT", "description": "description4" },{ "id": 5, "amount":216.57, "type": "CREDIT", "description": "description5" },{ "id": 6, "amount": 100.05, "type": "DEBIT", "description": "description6" },{ "id":7, "amount": 58.25, "type": "DEBIT", "description": "description7" },{ "id": 8, "amount": 999.9, "type": "CREDIT", "description": "description8" } ] ``` <file_sep>/src/main/resources/application.properties spring.datasource.url=jdbc:h2:mem:device spring.datasource.driverClassName=org.h2.Driver spring.datasource.username=abc spring.datasource.password=abc spring.batch.job.enabled=false <file_sep>/src/main/resources/schema.sql CREATE TABLE transaction ( id INTEGER NOT NULL , type VARCHAR(128) NOT NULL, description VARCHAR(255) NOT NULL, amount DOUBLE , PRIMARY KEY (id) );<file_sep>/src/main/java/com/rbello/ExampleSpringBatch/BatchConfig.java package com.rbello.ExampleSpringBatch; import javax.sql.DataSource; import org.springframework.batch.core.Job; import org.springframework.batch.core.Step; import org.springframework.batch.core.configuration.annotation.JobBuilderFactory; import org.springframework.batch.core.configuration.annotation.StepBuilderFactory; import org.springframework.batch.core.launch.support.RunIdIncrementer; import org.springframework.batch.item.database.BeanPropertyItemSqlParameterSourceProvider; import org.springframework.batch.item.database.JdbcBatchItemWriter; import org.springframework.batch.item.database.builder.JdbcBatchItemWriterBuilder; import org.springframework.batch.item.json.JacksonJsonObjectReader; import org.springframework.batch.item.json.JsonItemReader; import org.springframework.batch.item.json.builder.JsonItemReaderBuilder; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.io.ClassPathResource; @Configuration public class BatchConfig { @Autowired public JobBuilderFactory jobBuilderFactory; @Autowired public StepBuilderFactory stepBuilderFactory; @Autowired public DataSource dataSource; @Bean public Job processTransactionJob() { return jobBuilderFactory.get("processJob") .listener(listener()) .flow(processTransactionFromJSON()) .end() .build(); } @Bean public Step processTransactionFromJSON(){ System.out.println("BatchConfig.processTransactionFromJSON()"); return stepBuilderFactory.get("processTransactionFromJSON") .<Transaction, Transaction> chunk(2) .reader(transactionReader()) .processor(transactionProcessor()) .writer(transactionWriter(dataSource)) .build(); } @Bean public TransactionItemProcessor transactionProcessor() { return new TransactionItemProcessor(); } @Bean public JobListener listener() { return new JobListener(); } @Bean public JsonItemReader<Transaction> transactionReader() { System.out.println("BatchConfig.transactionReader()"); return new JsonItemReaderBuilder<Transaction>() .jsonObjectReader(new JacksonJsonObjectReader<>(Transaction.class)) .resource(new ClassPathResource("transaction.json")) .name("transactionJsonItemReader") .build(); } @Bean public JdbcBatchItemWriter<Transaction> transactionWriter(DataSource dataSource) { System.out.println("BatchConfig.transactionWriter()"); return new JdbcBatchItemWriterBuilder<Transaction>() .itemSqlParameterSourceProvider(new BeanPropertyItemSqlParameterSourceProvider<>()) .sql("INSERT INTO transaction (id, amount,type,description) VALUES (:id, :amount, :type, :description)") .dataSource(dataSource) .build(); } }
6fa67d0daa07b29ba00ab3265587ab4f9b3c11b3
[ "Java", "Markdown", "SQL", "INI" ]
5
Java
r3ello/springBatch
2b5a9b1d16142ce57e08ac33742838094b88dc3b
cfa4a42452b99096352e4fc6446770094f8f39a3
refs/heads/master
<repo_name>QA-InfoTech/jmeter-perf-plugin<file_sep>/README.md JEMETER-PERFORMANCE-PLUGIN DESCRIPTION: QAIT Automatic Ultimate Thread Group plug-in is the enhancement of Ultimate Thread Group which automatically distributes the users and accordingly configures the elements of Test Plan. For Distributed load test run, the calculation of initial delay, start-up time, hold load for &amp; ShutDown time parameters is a tedious task but with this plug-in all of this can be achieved easily. So, by providing the following field values the aforementioned parameters are populated accordingly without incorporating any human error. * Virtual User per Step Steady State Duration Ramp-up (in sec) * Ramp-down (in sec) * Number of Agents NOTE: For non-distributed load test, number of agents will be set to 1.
0489eea4433ddd326794a5b71cbc08668db6bc3e
[ "Markdown" ]
1
Markdown
QA-InfoTech/jmeter-perf-plugin
cf0d8c01b254807580ddaaf3002eb8cb1b44a98c
5628846bb7da9910ba445d20e0c8b0b2813bcb9b
refs/heads/master
<file_sep>import os root = os.getcwd() if __name__ == '__main__': # If the program is run directly, root = root.rpartition('\\')[0] # go up one folder in root global pathRegions; pathRegions = root + "\\data\\regions\\" # comment string # in region data files, anything to the right of one of these is ingored until the next line. global cString; cString = '//' global hexChars; hexChars = '0123456789abcdef' global hexCombos; hexCombos = ['%s%s'%(hexChars[n1],hexChars[n2]) for n1 in range(16) for n2 in range(16)] if __name__ == '__main__': from region import room,region else: from engine.region import room,region def notEmpty(line): '''Determines whether or not a line is empty (composed solely of tabs and spaces.)''' return bool(line.replace('\t','').replace(' ','')) def getInstances(string,inst): '''returns a list of the locations of all occurances of "inst" in "string"''' pos = 0 instances = [] while True: nextPos = string[pos:].find(inst) if nextPos == -1: return instances instances.append(pos + nextPos) pos += (nextPos + 1) def getSection(lines): # gets the first section in 'lines' & returns 'lines' split around the found region try: # startLine = lines.index('{') # except: # If there's not a '{' line remaining return lines, [], None, [] # before = lines, return None for sectionName if startLine == 0: print("Error: invalid region data. '{' found without identifier on previous line.") raise ValueError sectionName = lines[startLine-1] # name of the section should be the line before the '{' linesBefore = lines[:startLine-1] # lines before the section recursion = 1 # number of '{' deep linesSection = [] # lines inside the section pos = startLine + 1 # line to start looking at while recursion > 0: newLine = lines[pos] if newLine == '{':recursion += 1 if newLine == '}':recursion -= 1 if recursion > 0 :linesSection.append(newLine) pos += 1 linesAfter = lines[pos:] return linesBefore, linesSection, sectionName, linesAfter def getStandaloneLinesAndSections(lines): # splits 'lines' into standalone lines and subregions (sections) done = False standaloneLines = [] # lines not in any section sections = {} # dict of {name:contents} while not done: linesBefore, linesSection, sectionName, linesAfter = getSection(lines) if sectionName == None: # no more sections done = True # loop done standaloneLines += linesBefore # add rest of lines to standalone lines else: standaloneLines += linesBefore sections.update([ [sectionName,linesSection] ]) lines = list(linesAfter) return standaloneLines,sections def getValuePairs(lines): # input ['variable:value',...] output {'variable':'value',...} valueDict = {} for line in lines: key, delimeter, value = line.partition(':') if ':' in line: if key in valueDict.keys(): valueDict[key]=value else: valueDict.update([[key,value]]) return valueDict def checkDictContents(dict_,contents): # checks if the dict_ contains contents [[key,type],...] keys = dict_.keys() for key, type_ in contents: if not (key in keys): return False,[key,type_] try: type_(dict_[key]) except: return False,[key,type_] return True,[] def getIsTileDataValid(tileData): if len(tileData) < 16: # must be at least 16 rows return False # further ones are ignored for line in tileData: if len(line) < 47: # Minimum lenth is 47 (16*2 hex chars, +15 spaces) return False # example line: "ff 00 00 00 00 00 00 00 00 00 00 00 00 ff 00 ff" for n in range(16): # Check each pair of hex values if not(line[3*n:3*n+2] in hexCombos): # for legitimacy return False # 'f3' 'f5' '00' good, 'g5' '--' etc bad return True def processTileData(tileData): tileDataValid = getIsTileDataValid(tileData) if not tileDataValid: return False # returning false tells previous function that tile data was invalid return [[tileData[nY][3*nX:3*nX+2] for nY in range(16)] for nX in range(16)] ############################################################################################## ## The following functions will need to be updated as regions and rooms become more complex ## ############################################################################################## def loadRegion(ID): regionPath = pathRegions + 'region_%s.txt'%ID # check to see if the region exists if not (os.path.exists(regionPath)): print("Error: requested region (%s) does not exist."%regionPath) return None opfl = open(regionPath,'r') regionData = opfl.read() opfl.close() # close file del opfl # clean up return processRegionData(regionData,regionPath) def createRoom(roomData,regionPath,roomX,roomY): roomStandaloneLines, roomSections = getStandaloneLinesAndSections(roomData) roomConfigs = getValuePairs(roomStandaloneLines) requiredConfigs = [ # Required config variables for a room ['name',str], # List will be expanded as rooms become more complex ['x' ,int], ['y' ,int], ['BG' ,str], ['MG' ,str], ['FG' ,str], ] configsComplete,invalidPair = checkDictContents(roomConfigs,requiredConfigs) if not configsComplete: print("Error: room found without complete configuration in region (%s)"%regionPath) print("Missing or invalid config variable '%s' in room '%s,%s' with type '%s'"%(invalidPair[0],roomX,roomY,invalidPair[1])) raise ValueError roomName = roomConfigs['name'] roomXC = int(roomConfigs['x']) # different variable from roomX, to check that the positions defined here roomYC = int(roomConfigs['y']) # and when the room was declared agree with each other. roomBG = roomConfigs['BG'] roomMG = roomConfigs['MG'] roomFG = roomConfigs['FG'] if not ((roomXC == roomX) and (roomYC == roomY)): # make sure coordinants agree with each other print("Error room '%s,%s' found in region '%s' with contradictory coordinant declaration."%(roomX,roomY,regionPath)) raise ValueError roomMGData = None # I haven't added mid-ground roomFGData = None # or fore-ground data yet. if not ('bg_data') in roomSections.keys(): # make sure bg_data has been declared in this room print("Error: room '%s,%s' found without section 'bg_data' in region '%s'"%(roomX,roomY,regionPath)) raise ValueError roomBGData = processTileData(roomSections['bg_data']) if roomBGData == False: print("Error: room (%s,%s) found with invalid bg_data in region (%s)"%(roomX,roomY,regionPath)) raise ValueError return room( roomName, roomXC, roomYC, roomBG, roomMG, roomFG, roomBGData, roomMGData, roomFGData, ) def processRegionData(regionData,regionPath): # remove comments & empty lines lines = regionData.replace('\t','').splitlines() # remove tabs, split lines lines = [line.partition(cString)[0] for line in lines] # remove comments lines = [line for line in lines if notEmpty(line)] # remove empty lines # separate lines into standalones (should be empty) and present regions. standaloneLines,sections = getStandaloneLinesAndSections(lines) if len(standaloneLines) > 0: print("Warning: region being loaded (%s) has declarative lines outside of any region."%regionPath) ################################################ ## get region data from 'region_data' section ## ################################################ if not ('region_data' in sections.keys()): # If we didn't find a section labeled 'region_data' print("Error: section 'region_data' not found in region (%s)"%regionPath) # Inform the user that the region data file is invalid raise ValueError # Exit out with an error. regionData = getValuePairs(sections['region_data']) requiredRegionDataKeys = [ ['name' ,str], # These are config values necessary to initiate the region object ['ID' ,int], # More to be added later, as region objects grow more complex. ['x_dim',int], ['y_dim',int], ['bg' ,str], ['mg' ,str], ['fg' ,str], ] regionDataComplete,invalidPair = checkDictContents(regionData,requiredRegionDataKeys) if not regionDataComplete: print("Error: region data for region (%s) incomplete."%regionPath) print("Missing or invalid config variable '%s' in section 'region_data' with type '%s'"%(invalidPair[0],invalidPair[1])) raise ValueError regionName = regionData['name'] # Get values from the regionData dictionary regionID = int(regionData['ID']) # These will be used when declaring the region object regionXDim = int(regionData['x_dim']) # regionYDim = int(regionData['y_dim']) # more to be added as regions become more complex regionBG = regionData['bg'] regionMG = regionData['mg'] regionFG = regionData['fg'] ####################### ## extract room data ## ####################### if not ('rooms' in sections.keys()): print("Error: section 'rooms' not found in region (%s)"%regionPath) raise ValueError roomStandaloneLines, roomSections = getStandaloneLinesAndSections(sections['rooms']) if len(roomStandaloneLines) > 0: # warn user if sray lines present in rooms data print("Warning: declarative lines found in the 'rooms' section of region (%s). They will be ignored."%regionPath) # declare array of rooms. By default each is None; if not declared in the region file, any room does not exist. rooms = [ [ None for nY in range(regionYDim) ] for nX in range(regionXDim) ] for location in roomSections.keys(): roomX,delimeter,roomY = location.partition(',') try: # Make sure declarative coordinants are valid integers roomX = int(roomX) # roomY = int(roomY) # except: # print("Error: room found with invalid declarative coordinants in region (%s)."%regionPath) print("Declarative coordinants are '%s' and should be of form 'x,y' (x and y integers)"%location) raise ValueError if not ((0 <= roomX < regionXDim) and (0 <= roomY < regionYDim)): # make sure coordinants print("Error: room found with invalid declarative coordinants in region (%s)."%regionPath) # are within allowed bounds print("Coordinants (%s) outside allowed range (0 <= x < x_dim), (0 <= y < y_dim)"%location) # set by region size raise ValueError rooms[roomX][roomY] = createRoom(roomSections[location],regionPath,roomX,roomY) return region( regionName, regionID , regionXDim, regionYDim, regionBG , regionMG , regionFG , rooms , ) <file_sep>import pygame, os root = os.getcwd() if __name__ == '__main__': # If program is being run directly root = root.rpartition('\\')[0] # Step up one folder in root global hexChars; hexChars = '0123456789abcdef' global tsX; tsX = 256 global tsY; tsY = 256 global basePaths basePaths = { 'bg':root+'\\data\\tilesets\\bg\\', 'mg':root+'\\data\\tilesets\\fg\\', 'fg':root+'\\data\\tilesets\\mg\\' } def loadTileset(ID,kind): kind = kind.lower() if not (kind in ['bg','mg','fg']): # 'kind' must be one of three kinds print("Error: input 'kind' must be 'bg' 'mg' or 'fg'") # 'bg' (background), 'mg' (midground), 'fg' (foreground) return None # otherwise, can't load tileset tilesetPath = basePaths[kind] + '%s_%s.png'%(kind,ID) if not (os.path.exists(tilesetPath)): # check if the requested set exists print("Error: desired tileset (%s) does not exist."%tilesetPath) # If it doesn't return None # we can't load it try: tilesetImage = pygame.image.load(tilesetPath) except: print("Error: could not load tileset (%s). It may be corrupt."%tilesetPath) return None width = tilesetImage.get_width() height = tilesetImage.get_height() if not ((width == tsX) and (height == tsY)): # If we have the wrong dimensions print("Error: tileset (%s) is not %i by %i pixels (required size.)"%(tilesetPath,tsX,tsY)) # then it's not a valid tileset return None # and we can't load it tileDict = processTileset(tilesetImage.copy()) del tilesetImage # delete image to save memory return tileDict # return the completed dict def processTileset(tilesetImage, scale=2): tilesetImageScaled = pygame.transform.scale(tilesetImage,[tsX*scale,tsY*scale]) tileX = (tsX//16) * scale # width of a tile tileY = (tsY//16) * scale # height of a tile tileDict = {} for x in range(16): for y in range (16): img = pygame.surface.Surface((tileX,tileY)) img.blit(tilesetImageScaled,(-x*tileX,-y*tileY)) tileDict.update([ [hexChars[x]+hexChars[y],img.copy()] ]) del img # delete individual images to save memory del tilesetImage # Delete used images del tilesetImageScaled # to save memory return tileDict # return the populated dict <file_sep> class room(object): def __init__( self , # properties of room. More to be added when rooms become more complex. name , # room name x , # x coordinant inside host region y , # y coordinant inside host region bg , # background tileset: 'default' assumes default value from host region mg , # midground tileset: works same as above fg , # foreground tileset: works same as above bgData, # background tile data mgData, # midground tile data fgData, # foreground tile data ): self.name = name self.x = x self.y = y self.bgTileset = bg self.mgTileset = mg self.fgTileset = fg self.bgData = bgData self.mgData = mgData self.fgData = fgData class region(object): def __init__( self, name , # name of region ID , # region ID xDim , # x dimension of region (measured in rooms) yDim , # y dimension of regino (measured in rooms) bg , # default background tileset (hex code) mg , # default midground tileset (hex code) fg , # default foreground tileset (hex code) rooms , # room data ): self.name = name self.ID = ID self.xDim = xDim self.yDim = yDim self.bg = bg self.mg = mg self.fg = fg self.rooms = rooms <file_sep>""" This file exists to package this directory as an importable module. below is a list of component files, and their purposes: __init__.py : make folder a module, inform reader loaderRegion.py : load & process region data from region files loaderTileset.py : load & process tileset data from tileset files region.py : contains classes for room & region objects """
05c660e1bc7c388cbfb068c4575d2a01be3fe96a
[ "Python" ]
4
Python
mandolout/Engine-dev
dd246cbe9087da82d57623e8ed15d25a34bf0402
341c590a925587db4863ae0f8a23c19194856abf
refs/heads/main
<file_sep>using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Logging; using Newtonsoft.Json; using NRI.Models; using RestSharp; using System; using System.Collections.Generic; using System.Text; namespace NRI.Controllers { [Route("api/[controller]")] [ApiController] public class GITController : ControllerBase { private readonly ILogger<GITController> _logger; public GITController(ILogger<GITController> logger) { _logger = logger; } [HttpGet] [Route("GetUserRepository")] public IActionResult GetUserRepositaryAsync([FromQuery] QueryParameters parameters) { if (ModelState.IsValid) { try { var client = new RestClient("https://api.github.com/users/" + parameters.user + "/repos"); var request = new RestRequest(Method.GET); request.AddHeader("Content-Type", "application/vnd.github.v3+json"); request.AddHeader("Cache-Control", "no-cache"); request.AddHeader("Authorization", "Basic " + parameters.token); IRestResponse response = client.Execute(request); int StatusCode = (int)response.StatusCode; if (StatusCode == 200) { List<RepoList> data = JsonConvert.DeserializeObject<List<RepoList>>(response.Content); if (data == null) { return NotFound(); } StringBuilder sb = new StringBuilder(); sb.AppendLine("Repositories_Name"); for (int i = 0; i < data.Count; i++) { var repo = data[i]; sb.Append(repo.name + ','); sb.Append("\r\n"); } return File(Encoding.UTF8.GetBytes (sb.ToString()), "text/csv", "Reports.csv"); } else { return Ok(response.StatusDescription); } } catch (Exception) { return BadRequest(); } } return BadRequest(); } } } <file_sep># AccessGitRepo Access Git Repo using Rest API How to configure and run Clone code from Github: git clone https://github.com/ajaiswal3/AccessGitRepo Open solution NRI.sln in Visual Studio 2019 Build project which will restore all NuGet Packages Final Step Run Project
6cf547a5c3ce08f9af5ab02cdea03ebeb7bd8940
[ "C#", "Markdown" ]
2
C#
ajaiswal3/AccessGitRepo
c78c44ee3c9d9c84c40ffd8976881e060146aac1
4f48ce195513e65aea0a89eb75cfcbb0d06f1b3e
refs/heads/main
<repo_name>tanvirsiraj/shopping-cart-ex<file_sep>/app.js // console.log('I am in a separate file') function updateProduct(product,price, isIncreasing){ debugger const productInput = document.getElementById(product+'-number'); let productNumber = parseInt(productInput.value); if(isIncreasing == true){ productNumber = productNumber + 1; } else if(productNumber > 0){ productNumber = productNumber - 1; } productInput.value = productNumber; // update total const productTotal = document.getElementById(product+'-total'); productTotal.innerText = productNumber * price; calculation(); } function getInputNumber(product){ const productInput = document.getElementById(product+'-number'); const productNumber = parseInt(productInput.value); return productNumber; } function calculation(){ const phoneTotal = getInputNumber('phone') * 1219; const caseTotal = getInputNumber('case') * 59; const subTotal = phoneTotal + caseTotal; const taxTotal = subTotal / 10; const Total = subTotal + taxTotal; document.getElementById('sub-total').innerText = subTotal; document.getElementById('tax').innerText = taxTotal; document.getElementById('total').innerText = Total; } //handle phone increase decrease events document.getElementById('phone-plus').addEventListener('click', function(){ // console.log('phone plus clicked'); updateProduct('phone',1219, true); }); document.getElementById('phone-minus').addEventListener('click', function(){ // console.log('phone minus clicked'); updateProduct('phone',1219, false); }); //handle case increase decrease events document.getElementById('case-plus').addEventListener('click', function(){ // console.log('case plus clicked'); updateProduct('case',59, true); }); document.getElementById('case-minus').addEventListener('click', function(){ // console.log('case minus clicked'); updateProduct('case',59, false); });
8b149e2128314b6eee58318da771f564923339d8
[ "JavaScript" ]
1
JavaScript
tanvirsiraj/shopping-cart-ex
3955eea10ae5181a2d99391a5c9656cc402795c6
785e0808fb776f5c83d90d01762234ad5d6abada
refs/heads/master
<repo_name>nicksp/angular-ascii-warehouse<file_sep>/static/app/app.module.js import angular from 'angular'; // Config modules import constants from './app.constants'; import config from './app.config'; // Core modules import asciiShopAds from './ads/ads.module'; import asciiShopUtils from './utils/utils.module'; import asciiShopWarehouse from './warehouse/warehouse.module'; // Create and bootstrap application const requires = [ asciiShopUtils, asciiShopAds, asciiShopWarehouse ]; angular.module('asciiShop', requires); angular.module('asciiShop') .constant('AppSettings', constants) .config(['$locationProvider', config]); <file_sep>/static/app/utils/infinite-scroll/infinite-scroll.controller.js import angular from 'angular'; export default function infiniteScrollController($scope, $attrs, $window, Utils) { this.$postLink = function () { // Cache root elements const doc = $window.document; const docElement = doc.documentElement; // The height of the visible area of a document including // the height of the horizontal scrollbar (if present) const visibleHeight = docElement.offsetHeight; // Number of pixel from the bottom before we fetch next set of products const threshold = 200; angular.element($window).bind('scroll', Utils.throttle(() => { // Exit processing in case we're loading products at the moment or reached the end of catalogue if (this.wc.hasReachedEnd || this.wc.isLoading) { return false; } // Total height of all element's content, including non-visible content, // accounting for cases where html/body are set to height: 100% let scrollableHeight = (docElement && docElement.scrollHeight) || doc.body.scrollHeight; // How much the window is scrolled down let scrollTop = (docElement && docElement.scrollTop) || doc.body.scrollTop; /** * Check if we're almost at the bottom of products grid and load more products. * * Take the top scroll of the window * add the window's viewport height (visible window) * and check if that >= the height of the overall content (document). */ if (scrollTop + visibleHeight >= scrollableHeight - threshold) { $scope.$apply(this.wc[$attrs.action]); } }, 100)); }; } <file_sep>/static/app/warehouse/content/status/warehouse-content-status.component.js const warehouseContentStatus = { templateUrl: 'app/warehouse/content/status/content-status.html', require: { wc: '^^warehouseContent' // this directive look for the controller on its parents } }; export default warehouseContentStatus; <file_sep>/static/app/app.constants.js const appSettings = { apiUrl: '/api/products', adsUrl: '/ad/' }; export default appSettings; <file_sep>/static/app/ads/ads.module.js import angular from 'angular'; import adsService from './ads.service'; export default angular .module('asciiShop.ads', []) .factory('adsService', ['$sce', 'Utils', 'AppSettings', adsService]) .name; <file_sep>/static/app/warehouse/warehouse-products.service.js export default function productsService($http, Utils, AppSettings) { // Public API const service = { getItems }; return service; //////////// // Get next set of products function getItems(sort, limit, skip) { const config = { transformResponse: (stream) => { return Utils.parseStreamingJSON(stream); } }; return $http.get(_getApiUrl(sort, limit, skip), config); } function _getApiUrl(sort, limit, skip) { return AppSettings.apiUrl + `?sort=${sort}&limit=${limit}&skip=${skip}`; } } <file_sep>/static/app/utils/timestamp.filter.js export default function timestamp() { return function (date) { // Map of durations in seconds const epochs = { day: 86400, hour: 3600, minute: 60 }; const getDuration = (seconds) => { for (let epoch of Object.keys(epochs)) { let interval = Math.floor(seconds / epochs[epoch]); if (interval >= 1) { return { interval: interval, epoch: epoch }; } } }; const fromNow = (date) => { let seconds = Math.floor((new Date() - new Date(date)) / 1000); let { interval, epoch } = getDuration(seconds); let suffix = interval === 1 ? '' : 's'; if (interval > 7 && epoch === 'day') { return date; } return `${interval} ${epoch}${suffix} ago`; }; return fromNow(date); }; } <file_sep>/static/app/utils/utils.service.js export default function Utils($timeout) { /** * Create an array with unique values by shuffling a list of numbers. */ this.getRandomArray = (size) => { let arr = [], i = 0; for ( ; i < size; i++) { arr[i] = i; } function shuffle(array) { let tmp, current, len = array.length; if (len) { while (--len) { current = Math.floor(Math.random() * (len + 1)); tmp = array[current]; array[current] = array[len]; array[len] = tmp; } } return array; } return shuffle(arr); }; /** * Get a random integer between min (included) and max (included). */ this.getRandomInt = (min, max) => { return Math.floor(Math.random() * (max - min + 1)) + min; }; /** * For parsing newline delimited JSON (Streaming JSON). */ this.parseStreamingJSON = (stream) => { let data = stream.split('\n'); // If the last element is empty, remove it if (!data[data.length - 1]) { data.pop(); } return data.map((item) => { return JSON.parse(item); }); }; /** * Throttle the amount of times function `fn` runs. */ this.throttle = (fn, delay) => { let timer = null; return function () { let context = this, args = arguments; $timeout.cancel(timer); timer = $timeout(() => { fn.apply(context, args); }, delay); }; }; } <file_sep>/static/app/warehouse/product/warehouse-product.component.js const warehouseProduct = { bindings: { item: '=' }, templateUrl: 'app/warehouse/product/product.html' }; export default warehouseProduct; <file_sep>/static/app/app.config.js function onConfig($locationProvider) { $locationProvider.html5Mode(true); } export default onConfig; <file_sep>/static/assets/styles/main.scss @import "reset"; @import "vars"; @import "grid"; @import "common"; /* * App specific styles */ .b-root { min-width: 220px; max-width: 1680px; margin: 0 auto; text-align: left; } .box { background-color: $background--light; } .product-grid { overflow: hidden; padding-bottom: $half-space; .product-item { $shadowColor: rgba(0, 0, 0, .4); $shadowSize: 7px; position: relative; width: 320px; height: 200px; display: inline-block; vertical-align: top; margin: 24px 24px 0 4px; box-shadow: 0 0 $shadowSize $shadowColor; text-align: center; white-space: nowrap; overflow: hidden; &:hover { box-shadow: 0 0 $shadowSize*2 $shadowColor; } } } .product-item__meta { margin-top: 30px; } .product-item__face { padding-top: 16px; } .product-item__price { position: absolute; top: 50%; -webkit-transform: translateY(-50%); transform: translateY(-50%); width: 100%; color: $blue; font-size: $font-size--md; } .product-item__timestamp { position: absolute; bottom: 1.5em; left: 2em; color: $grayish; font-size: $font-size--xs; text-align: left; white-space: normal; } .sort-selector { margin: $half-space 0 0; } .app-status { text-align: center; padding: $font-size--xmd; background: rgba(0, 138, 222, .3); color: $font-color--dark; font-size: $font-size--md; line-height: 2; margin-bottom: $font-size--xmd; .status-loader { vertical-align: middle; margin-right: 6px; } p { margin-bottom: 0; } } <file_sep>/static/app/warehouse/header/warehouse-header.component.js import WarehouseHeaderCtrl from './warehouse-header.controller'; const warehouseHeader = { templateUrl: 'app/warehouse/header/header.html', controller: [WarehouseHeaderCtrl], controllerAs: 'warehouseHeader' }; export default warehouseHeader;
86695ba0f5bf060fff96233abe878e3f8dc87aa6
[ "SCSS", "JavaScript" ]
12
SCSS
nicksp/angular-ascii-warehouse
4326cf7bc3b55ce4136d3767d5acfea432efb328
774ba68788b8d67b700ded3ac0be76e7c28f8216
refs/heads/master
<file_sep>Backtracking search with the n queens problem. - 4_queens.py: hardcoded position values
33d4c74a7fa02b28e6e8e8155b413444fc6fbba3
[ "Text" ]
1
Text
kgashok/n_queens
bb2c35191b5da5215c37177412060857bf0a39e5
848f6cd041f33e1549594d23b507fbc1b1527c7d
refs/heads/master
<repo_name>wardasz/sem2<file_sep>/obrazy/lab7/zad1b-z klasami/obrazy/pole.h #pragma once class pole { public: pole(); pole(int x, int y); ~pole(); void ustawX(int); void ustawY(int); int dajX(); int dajY(); private: int X; int Y; }; <file_sep>/grafy/paraPunktow/paraPunktow2/Program.cs using System; using System.Collections.Generic; using System.IO; using System.Linq; namespace paraPunktow2 { class Program { static void Main(string[] args) { var s = new FileInfo(Directory.GetCurrentDirectory()); var s2 = s.Directory.Parent.Parent; String s3 = s2.ToString() + "\\dane.csv"; listy S = new listy(); punkt dodawany; using (var reader = new StreamReader(s3)) { while (!reader.EndOfStream) { var line = reader.ReadLine(); var values = line.Split(','); dodawany = new punkt(Convert.ToInt32(values[0]), Convert.ToInt32(values[1])); S.dodaj(dodawany); } } S.sortuj(); List<punkt> wynik = policz(S); punkt a = wynik.ElementAt(0); punkt b = wynik.ElementAt(1); double dystans = a.dystans(b); Console.WriteLine("Najbliższa para: "); a.napisz(); b.napisz(); Console.WriteLine("Dystan pomiędzy nimi równy jest " + Math.Sqrt(dystans)); Console.ReadKey(); } public static List<punkt> policz(listy S) { List<punkt> Sx = S.dajSx(); List<punkt> Sy = S.dajSy(); int ile = Sx.Count(); if (ile < 5) { punkt a = Sx.ElementAt(0); punkt b = Sx.ElementAt(1); double min = a.dystans(b); for(int x = 0; x<Sx.Count(); x++) { for(int y = x+1; y < Sx.Count(); y++) { double dys = Sx.ElementAt(x).dystans(Sx.ElementAt(y)); if (dys < min) { min = dys; a = Sx.ElementAt(x); b = Sx.ElementAt(y); } } } List<punkt> wynik = new List<punkt>(); wynik.Add(a); wynik.Add(b); return wynik; } else { listy S1 = new listy(); listy S2 = new listy(); int l; for (int x = 0; x < ile; x++) { punkt p = Sx.ElementAt(x); if (x < ile / 2) { S1.dodajX(p); p.ustawPrzed(true); } else { S2.dodajX(p); p.ustawPrzed(false); } } for (int x = 0; x < ile; x++) { punkt p = Sy.ElementAt(x); if (p.czyPrzed()==true) { S1.dodajY(p); } else { S2.dodajY(p); } } Sx = S1.dajSx(); ile = Sx.Count(); ile -= 1; punkt ostatni = Sx.ElementAt(ile); l = ostatni.dajX(); List<punkt> wynik1 = policz(S1); //najbliższe pary w podzbiorach List<punkt> wynik2 = policz(S2); double dystans1 = wynik1.ElementAt(0).dystans(wynik1.ElementAt(1)); double dystans2 = wynik2.ElementAt(0).dystans(wynik2.ElementAt(1)); double minDystans; //dystans między mniejszą parą List<punkt> minPara; //mniejsza para if (dystans1 < dystans2) { minDystans = dystans1; minPara = wynik1; } else { minDystans = dystans2; minPara = wynik2; } //tworzenie list punktów w pasie przy prostej listy S1znaczek = new listy(); listy S2znaczek = new listy(); List<punkt> tmp = S1.dajSy(); foreach(punkt p in tmp) { double odleglosc = p.dajX() - l; if (odleglosc < 0) odleglosc *= -1; if(odleglosc<= minDystans) { S1znaczek.dodaj(p); } } tmp = S2.dajSy(); foreach (punkt p in tmp) { double odleglosc = p.dajX() - l; if (odleglosc < 0) odleglosc *= -1; if (odleglosc <= minDystans) { S2znaczek.dodaj(p); } } List<punkt> tmp1 = S1znaczek.dajSy(); List<punkt> tmp2 = S2znaczek.dajSy(); foreach (punkt p in tmp1) { int licznik = 0; foreach(punkt P in tmp2) { if (P.dajY() >= p.dajY()) { licznik += 1; if (p.dystans(P) < minDystans) { minDystans = p.dystans(P); minPara = new List<punkt>(); minPara.Add(p); minPara.Add(P); } if (licznik >= 4) { break; } } } } foreach (punkt p in tmp2) { int licznik = 0; foreach (punkt P in tmp1) { if (P.dajY() >= p.dajY()) { licznik += 1; if (p.dystans(P) < minDystans) { minDystans = p.dystans(P); minPara = new List<punkt>(); minPara.Add(p); minPara.Add(P); } if (licznik >= 4) { break; } } } } return minPara; } } } } <file_sep>/grafy/sumy/sumy/skladnik.cs using System; using System.Collections.Generic; using System.Text; namespace sumy { class skladnik { private int wartosc; public skladnik(int a) { wartosc = a; } public int dajWartosc() { return wartosc; } public int porownaj(skladnik p) { if (wartosc > p.dajWartosc()) { return 1; } else { return -1; } } } } <file_sep>/obrazy/lab7/zad1b-z klasami/obrazy/pole.cpp #include "pole.h" #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iomanip> #include<iostream> #include<conio.h> using namespace cv; using namespace std; pole::pole() { } pole::pole(int x, int y) { X = x; Y = y; } pole::~pole() { } void pole::ustawX(int x) { pole::X = x; } void pole::ustawY(int y) { pole::Y = y; } int pole::dajX() { return pole::X; } int pole::dajY() { return pole::Y; } <file_sep>/wzorce/singleton/singleton/logerMlody1.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace singleton { public class logerMlody1 : logerStary1 { new public void loguj() { Console.WriteLine("syn jeden pisze"); } } } <file_sep>/obrazy/lab7/zad1a-bez klas/obrazy/pole.cpp #include "pole.h" #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iomanip> #include<iostream> #include<conio.h> using namespace cv; using namespace std; pole::pole() { } pole::pole(cv::Point p) { koordynaty = p; } pole::~pole() { } cv::Point pole::dajKoordynaty() { return koordynaty; } void pole::ustawKoordynaty(cv::Point p) { koordynaty=p; } <file_sep>/grafy/gałąź/grafy1/linia.cs using System; using System.Collections.Generic; using System.Text; namespace gałąź { class linia { private punkt p1; private punkt p2; public linia(punkt P1, punkt P2) { p1 = P1; p2 = P2; } public void napisz() { Console.WriteLine("Linia łącząca punkty (" + p1.dajX() + "," + p1.dajY() + ") oraz (" + p2.dajX() + "," + p2.dajY() + ")"); } } } <file_sep>/obrazy/lab12/zad/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iostream> #include<conio.h> #include<chrono> #include<iomanip> #include<ctime> #pragma warning(disable : 4996) //_CRT_SECURE_NO_WARNINGS using namespace cv; using namespace std; bool detekcja(Mat stara, Mat nowa) { Mat staraSzara; Mat nowaSzara; cvtColor(stara, staraSzara, CV_BGR2GRAY); cvtColor(nowa, nowaSzara, CV_BGR2GRAY); Mat robocza; absdiff(nowaSzara, staraSzara, robocza); CvSize rozmiar; rozmiar.width = (10 * 2) + 1; rozmiar.height = (10 * 2) + 1; GaussianBlur(robocza, robocza, rozmiar, 0); threshold(robocza, robocza, 80, 255, CV_THRESH_BINARY); vector<vector<Point>> kontury; vector<Vec4i> hier; findContours(robocza, kontury, hier, RETR_TREE, CHAIN_APPROX_SIMPLE); //findContours(robocza, kontury, hier, RETR_TREE, CV_RETR_EXTERNAL); if (kontury.size() > 0) { return true; } else { return false; } } int main() { typedef std::chrono::high_resolution_clock Time; typedef std::chrono::milliseconds ms; typedef std::chrono::duration<float> fsec; Mat stara; Mat nowa; Mat rys; Mat zapis; int numer = 1; bool nagrywanie=true; bool pierwsza=true; bool ruch = false; int tryb = 0; auto start = Time::now(); string nazwa; stringstream ss; VideoCapture cap; cap.open(0); namedWindow("window", CV_WINDOW_AUTOSIZE); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); int fps = 25; CvSize rozmiar; rozmiar.width = 640; rozmiar.height = 480; VideoWriter twozony("D:\\klatki\\nowy1.avi", CV_FOURCC('D', 'I', 'V', 'X'), fps, rozmiar, true); while (1) { try { if (pierwsza==true) { cap >> stara; imshow("window", stara); pierwsza = false; }else{ cap >> nowa; nowa.copyTo(rys); nowa.copyTo(zapis); ruch = detekcja(stara, nowa); if (tryb == 0 && ruch == true) { tryb = 1; } if (tryb == 1 && ruch == false) { tryb = 2; start = Time::now(); } if (tryb == 2) { auto teraz = Time::now(); fsec roznica = teraz - start; ms czas = std::chrono::duration_cast<ms>(roznica); int czas2 = czas.count(); if (czas2 > 5000) { tryb = 3; } } if (nagrywanie == true) { if (tryb == 1 || tryb == 2) { circle(rys, Point(10, 10), 6, Scalar(0, 0, 255), -1, 8); time_t zegar = time(0); tm* czas = localtime(&zegar); char znaki[50]; sprintf(znaki, "%d-%02d-%02d %02d:%02d:%02d", (czas->tm_year + 1900), (czas->tm_mon + 1), (czas->tm_mday), (czas->tm_hour), (czas->tm_min), (czas->tm_sec)); String napis(znaki); putText(zapis, napis, Point(5, 15), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 0), 1, CV_AA); twozony.write(zapis); } } imshow("window", rys); nowa.copyTo(stara); } } catch (Exception e) { cap.release(); twozony.release(); cvDestroyAllWindows(); break; } int znak = waitKey(15); switch (znak) { case 27: cap.release(); twozony.release(); cvDestroyAllWindows(); return 0; case 49: //1-spauzowanie nagrywania if (nagrywanie == true) { nagrywanie = false; } else { nagrywanie = true; } break; case 50: //2-przerwanie nagrania tryb = 3; break; case 51: //3-nowe nagranie numer++; twozony.release(); nazwa = "D:\\klatki\\nowy"; ss.str(std::string()); ss << numer; nazwa += string(ss.str()); nazwa += ".avi"; twozony = VideoWriter(nazwa, CV_FOURCC('D', 'I', 'V', 'X'), fps, rozmiar, true); tryb = 0; pierwsza = true; break; default: break; } } return 0; }<file_sep>/obrazy/lab5/zad2-Gaus/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<cmath> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int robocza1; int robocza2; int robocza3; int robocza4; int robocza5; int robocza6; int robocza7; int Gaus1; int Gaus2; int tresh1; int tresh2; int scale; int delta; void on_trackbar(int, void*) { Gaus1 = robocza1; Gaus2 = robocza2; tresh1 = robocza3; tresh2 = robocza4; scale = robocza5; delta = robocza6; } int main() { Mat image; Mat image2; Mat image3; Mat image4; Mat image5; Mat image6; Mat image7; Mat image8; Mat image9; Mat image10; Mat image11; Mat image12; namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); namedWindow("window5", CV_WINDOW_AUTOSIZE); namedWindow("window6", CV_WINDOW_AUTOSIZE); namedWindow("window7", CV_WINDOW_AUTOSIZE); namedWindow("Suwaki", 4); createTrackbar("Gaus-wysokość", "Suwaki", &robocza1, 20, on_trackbar); createTrackbar("Gaus-długość", "Suwaki", &robocza2, 20, on_trackbar); createTrackbar("Treshhold-dol", "Suwaki", &robocza3, 255, on_trackbar); createTrackbar("Treshhold-gora", "Suwaki", &robocza4, 255, on_trackbar); createTrackbar("Sobel-scale", "Suwaki", &robocza5, 20, on_trackbar); createTrackbar("Sobel-delta", "Suwaki", &robocza6, 20, on_trackbar); VideoCapture cap; cap.open(0); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 384); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 288); while (1) { try { cap >> image; imshow("window", image); cvtColor(image, image2, CV_BGR2GRAY); CvSize rozmiar; rozmiar.width = (Gaus1*2)+1; rozmiar.height = (Gaus2 * 2) + 1; //image2 = zgausowane GaussianBlur(image2, image2, rozmiar, 0); //image 3,4-soble, 5,6-abs'y Sobel(image2, image3, CV_16S, 1, 0, 3, scale, delta, BORDER_DEFAULT); Sobel(image2, image4, CV_16S, 0, 1, 3, scale, delta, BORDER_DEFAULT); convertScaleAbs(image3, image5); convertScaleAbs(image4, image6); imshow("window3", image5); imshow("window4", image6); //image 7,8 - tresholdy threshold(image5, image7, tresh1, tresh2, CV_THRESH_BINARY); threshold(image6, image8, tresh1, tresh2, CV_THRESH_BINARY); imshow("window5", image7); imshow("window6", image8); //image9-suma addWeighted(image7, 1, image8, 2, 0.0, image9); imshow("window2", image9); image3.convertTo(image3, CV_32FC1); image4.convertTo(image4, CV_32FC1); image10.convertTo(image10, CV_32FC1); image11.convertTo(image11, CV_32FC1); //image 10 to magnituda, 11 to kąty cartToPolar(image3, image4, image10, image11, true); image9.copyTo(image12); cvtColor(image12, image12, CV_GRAY2BGR); Vec3b czern = (0, 0, 0); Vec3b kol1 = (255, 255, 255); Vec3b kol2 = (255, 0, 0); Vec3b kol3 = (0, 255, 0); Vec3b kol4 = (0, 0, 255); for (int i = 0; i < image9.rows; i++) { for (int j = 0; j < image9.cols; j++) { if (image12.at<Vec3b>(i, j) != czern && image11.at<float>(i, j) > 45 && image11.at<float>(i, j) <= 135) image12.at<Vec3b>(i, j) = kol1; if (image12.at<Vec3b>(i, j) != czern && image11.at<float>(i, j) > 135 && image11.at<float>(i, j) <= 225) image12.at<Vec3b>(i, j) = kol2; if (image12.at<Vec3b>(i, j) != czern && image11.at<float>(i, j) > 225 && image11.at<float>(i, j) <= 315) image12.at<Vec3b>(i, j) = kol3; if (image12.at<Vec3b>(i, j) != czern && image11.at<float>(i, j) > 315 && image11.at<float>(i, j) <= 360) image12.at<Vec3b>(i, j) = kol4; } } imshow("window7", image12); } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/wzorce/singleton/singleton/logerStary1.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace singleton { public class logerStary1 : logerBaza<logerStary1> { new public void loguj() { Console.WriteLine("ociec jeden pisze"); } } } <file_sep>/obrazy/lab9/zad2-film/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<opencv2/video/tracking.hpp> #include<opencv2/videoio.hpp> #include<iostream> #include<conio.h> #include<ctype.h> using namespace cv; using namespace std; int robocza1; int robocza2; int robocza3; int ilosc; double jakosc; double dystans; bool zmiana; void on_trackbar(int, void*) { if (robocza1 > 10) { ilosc = robocza1; } else { ilosc = 10; } if (robocza2 = 0) { jakosc = (double)(robocza2/10); } else { jakosc = 0.1; } if (robocza3 > 10) { dystans = robocza3; } else { dystans = 10; } zmiana = true; } bool czyZaDaleko(Point2f p1, Point2f p2, int odleglosc) { return (abs(p1.x - p2.x) > odleglosc || abs(p1.y - p2.y) > odleglosc); } int main() { VideoCapture cap("D:\\klatki\\bike.avi"); Mat image; Mat image2; Mat staryGray; Mat nowyGray; vector<Point2f> rogi; vector<Point2f> sledzone; vector<Point2f> tmp; vector<uchar> status; vector<float> blendy; bool start = true; zmiana = false; namedWindow("Suwaki", 4); createTrackbar("maxCorners", "Suwaki", &robocza1, 100, on_trackbar); createTrackbar("qualityLevel,", "Suwaki", &robocza2, 10, on_trackbar); createTrackbar("minDistance", "Suwaki", &robocza3, 100, on_trackbar); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); ilosc = 30; jakosc = 0.1; dystans = 10; while (1) { try { cap >> image; image.copyTo(image2); imshow("window", image); if (start == true) { cvtColor(image, staryGray, COLOR_BGR2GRAY); tmp.clear(); sledzone.clear(); goodFeaturesToTrack(staryGray, tmp, ilosc, jakosc, dystans); for (int i = 0; i < tmp.size(); ++i) { sledzone.push_back(tmp[i]); } start = false; }else{ cvtColor(image, nowyGray, COLOR_BGR2GRAY); if (sledzone.size() > 0) { calcOpticalFlowPyrLK(staryGray, nowyGray, sledzone, rogi, status, blendy); } int licznik = 0; for (int i = 0; i < status.size(); i++) { if (status[i] == false || czyZaDaleko(sledzone[licznik], rogi[licznik], 2) == false) { sledzone.erase(sledzone.begin() + licznik); rogi.erase(rogi.begin() + licznik); } else { licznik++; } } for (int i = 0; i < sledzone.size(); ++i) { circle(image2, sledzone[i], 3, Scalar(0, 0, 255), -1, 8); line(image2, sledzone[i], rogi[i], Scalar(0, 0, 255)); } if (sledzone.size() < 10 || zmiana == true) { nowyGray.copyTo(staryGray); tmp.clear(); sledzone.clear(); goodFeaturesToTrack(staryGray, tmp, ilosc, jakosc, dystans); for (int i = 0; i < tmp.size(); ++i) { sledzone.push_back(tmp[i]); } zmiana = false; } rogi.clear(); status.clear(); imshow("window2", image2); } } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/obrazy/lab7/zad1a-bez klas/obrazy/obiekt.h #pragma once class obiekt { public: obiekt(); obiekt(Point p); ~obiekt(); }; <file_sep>/grafy/gałąź/grafy1/kandydat.cs using System; using System.Collections.Generic; using System.Text; namespace gałąź { class kandydat { punkt p1; punkt p2; int x; int y; public kandydat(punkt a, punkt b) { p1 = a; p2 = b; x = Math.Min(a.dajX(), b.dajX()); y = Math.Min(a.dajY(), b.dajY()); } public int dajX() { return x; } public int dajY() { return y; } public punkt dajP() { return p1; } public punkt dajQ() { return p2; } public void napisz() { Console.WriteLine("Kandydat o <p,q> = " + x + "," + y); p1.napisz(); p2.napisz(); Console.WriteLine(); } public int dystans() { return x + y; } public int porownaj(kandydat a) { if (dystans() != a.dystans()) { if (dystans() > a.dystans()) { return -1; } else { return 1; } } else { if (x > a.dajX()) { return 1; } else { return -1; } } } } } <file_sep>/obrazy/lab2/zad4i5-zapisywanie klatek z kamery/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iostream> #include<conio.h> #include <chrono> #include <ctime> using namespace cv; using namespace std; int main() { typedef std::chrono::high_resolution_clock Time; typedef std::chrono::milliseconds ms; typedef std::chrono::duration<float> fsec; int ile = 0; int opuznienie = 0; printf("Ile klatek chcesz zapisać: "); cin >> ile; printf("Jakie duże ma być opóźnienie przed rozpoczęciem zapisu (w milisekundach): "); cin >> opuznienie; Mat image; VideoCapture cap; cap.open(0); auto start = Time::now(); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); int numer = 0; while (1) { try { cap >> image; vector<int> compression_params; compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); compression_params.push_back(100); auto teraz = Time::now(); fsec roznica = teraz - start; ms czas = std::chrono::duration_cast<ms>(roznica); int czas2 = czas.count(); cout << czas2 << "\n"; if (czas2 > opuznienie) { numer=numer+1; if (numer <= ile) { string sciezka = "D:\\klatki\\obrazek"; stringstream ss; ss << numer; sciezka += string(ss.str()); sciezka += ".jpg"; bool bSuccess = imwrite(sciezka, image, compression_params); cout << "Zapisuje obraz nr" << numer << "\n"; } else { cap.release(); break; } } } catch (Exception e) { cap.open(1); } if (waitKey(15) == 27) { cap.release(); break; } } return 0; }<file_sep>/obrazy/lab10/zad/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<opencv2/objdetect/objdetect.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int robocza1; int robocza2; int robocza3; double skala; int sasiedzi; int rozmiar; void on_trackbar(int, void*) { skala = (1.05+((double)robocza1/20)); sasiedzi = robocza2+3; rozmiar = robocza3+10; } int main() { namedWindow("Suwaki", 4); createTrackbar("scaleFactor", "Suwaki", &robocza1, 8, on_trackbar); createTrackbar("minNeighbors", "Suwaki", &robocza2, 3, on_trackbar); createTrackbar("minSize", "Suwaki", &robocza3, 20, on_trackbar); skala = 1.1; sasiedzi = 3; rozmiar = 30; Mat image; Mat image2; Mat image3; Mat pierwsza; vector<Rect> twarze; vector<Rect> oczy; CascadeClassifier kaskadaTwarzy; CascadeClassifier kaskadaOczu; int wersja =1; Point centrum; Rect Rec; Mat zapisana; Mat wstawiana; VideoCapture cap; cap.open(0); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); String twazeNazwa = "D:\\klatki\\haarcascade_frontalface_alt.xml"; String oczyNazwa = "D:\\klatki\\haarcascade_eye_tree_eyeglasses.xml"; if (!kaskadaTwarzy.load(twazeNazwa)) { return -1; }; if (!kaskadaOczu.load(oczyNazwa)) { return -1; }; while (1) { try { cap >> image; imshow("window", image); image.copyTo(image3); cvtColor(image, image2, CV_BGR2GRAY); equalizeHist(image2, image2); kaskadaTwarzy.detectMultiScale(image2, twarze, skala, sasiedzi, 0 | CV_HAAR_SCALE_IMAGE, Size(rozmiar, 30)); for (size_t i = 0; i < twarze.size(); i++) { Mat ROI = image2(twarze[i]); Mat Roi = image(twarze[i]); kaskadaOczu.detectMultiScale(ROI, oczy, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30)); switch (wersja) { case 1: centrum = Point(twarze[i].x + twarze[i].width*0.5, twarze[i].y + twarze[i].height*0.5); ellipse(image3, centrum, Size(twarze[i].width*0.5, twarze[i].height*0.5), 0, 0, 360, Scalar(0, 0, 255), 4, 8, 0); for (size_t j = 0; j < oczy.size(); j++) { centrum = Point(twarze[i].x + oczy[j].x + oczy[j].width*0.5, twarze[i].y + oczy[j].y + oczy[j].height*0.5); int promien = cvRound((oczy[j].width + oczy[j].height)*0.25); circle(image3, centrum, promien, Scalar(255, 0, 0), 4, 8, 0); } break; case 2: if (oczy.size() > 1) { Point p1(twarze[i].x + oczy[0].x + oczy[0].width*0.5, twarze[i].y + oczy[0].y + oczy[0].height*0.5); Point p2(twarze[i].x + oczy[1].x + oczy[1].width*0.5, twarze[i].y + oczy[1].y + oczy[1].height*0.5); if (p1.x > p2.x) { p1.x = p1.x + 50; p1.y = p1.y + 50; p2.x = p2.x - 50; p2.y = p2.y - 50; } else { p1.x = p1.x - 50; p1.y = p1.y - 50; p2.x = p2.x + 50; p2.y = p2.y + 50; } rectangle(image3, p1, p2, Scalar(0, 0, 0), -1); } break; case 3: GaussianBlur(Roi, Roi, CvSize(55, 55), 0); Rec = Rect(twarze[i].x, twarze[i].y, twarze[i].width, twarze[i].height); Roi.copyTo(image3(Rec)); break; case 4: if (i == 0) { Roi.copyTo(zapisana); } else { resize(zapisana, wstawiana, CvSize(twarze[i].width, twarze[i].height), 0, 0, CV_INTER_LINEAR); Rec = Rect(twarze[i].x, twarze[i].y, twarze[i].width, twarze[i].height); wstawiana.copyTo(image3(Rec)); } break; default: break; } } imshow("window2", image3); } catch (Exception e) { //cap.release(); //cvDestroyAllWindows(); //break; } int znak = waitKey(15); switch (znak) { case 27: cap.release(); cvDestroyAllWindows(); return 0; case 48: wersja = 0; break; case 49: wersja = 1; break; case 50: wersja = 2; break; case 51: wersja = 3; break; case 52: wersja = 4; break; default: break; } } return 0; }<file_sep>/grafy/gałąź2/gałąź2/punkt.cs using System; using System.Collections.Generic; using System.Text; namespace gałąź2 { class punkt { private int x; private int y; public punkt(int a, int b) { x = a; y = b; } public int dajX() { return x; } public int dajY() { return y; } public int dystans() { return x + y; } public void napisz() { Console.WriteLine(x + "," + y); } } } <file_sep>/obrazy/lab13/zad/obrazy/main.cpp #include <opencv2/opencv.hpp> #define CVUI_IMPLEMENTATION #include "cvui.h" #define WINDOW_NAME "Panel Sterowania" int main(int argc, const char *argv[]) { cv::Mat frame = cv::Mat(90, 250, CV_8UC3); cvui::init(WINDOW_NAME); bool wybrano = false; cv::Mat zrodlo; cv::VideoCapture cap; cv::namedWindow("film", CV_WINDOW_AUTOSIZE); while (true) { frame = cv::Scalar(49, 52, 49); if (cvui::button(frame, 20, 10, "Przetwarzaj obraz z kamery")) { cap.open(0); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); wybrano = true; } if (cvui::button(frame, 20, 50, "Przetwarzaj obraz z dysku")) { cap = cv::VideoCapture("D:\\klatki\\film.avi"); wybrano = true; } cvui::imshow(WINDOW_NAME, frame); if (wybrano == true) { cap >> zrodlo; cv::imshow("film", zrodlo); } // Check if ESC key was pressed if (cv::waitKey(20) == 27) { break; } } return 0; }<file_sep>/grafy/0jądro/grafy1/Program.cs using System; using System.Collections.Generic; using System.IO; namespace grafy1 { class Program { static void Main(string[] args) { List<int> x = new List<int>(); List<int> y = new List<int>(); var s = new FileInfo(Directory.GetCurrentDirectory()); var s2 = s.Directory.Parent.Parent; String sciezka = s2.ToString() + "\\dane.csv"; using (var reader = new StreamReader(sciezka)) { while (!reader.EndOfStream) { var line = reader.ReadLine(); var values = line.Split(','); x.Add(Convert.ToInt32(values[0])); y.Add(Convert.ToInt32(values[1])); } } int ile = x.Count; if (ile < 3) { Console.WriteLine("Za mało wieszchołków by stworzyć wielokąt."); Console.ReadKey(); return; } int dol=y[0]; int gora=y[0]; for(int i = 1; i<ile; i++) { if (y[i] > gora) { gora = y[i]; } if (y[i] < dol) { dol = y[i]; } } for (int i = 0; i < ile; i++) { int poprzedni = i - 1; if (poprzedni == -1) { poprzedni = ile - 1; } int nastepny = i + 1; if (nastepny == ile) { nastepny = 0; } if (y[i] == y[poprzedni]) { poprzedni--; } if (y[i] == y[nastepny]) { nastepny++; } if (poprzedni == -1) { poprzedni = ile - 1; } if (nastepny == ile) { nastepny = 0; } int det = x[poprzedni] * y[i] + y[poprzedni] * x[nastepny] + x[i] * y[nastepny] - x[poprzedni] * y[nastepny] - y[poprzedni] * x[i] - y[i] * x[nastepny]; if (det < 0) { if (y[i] < y[nastepny] && y[i] < y[poprzedni] && y[i] < gora) { gora = y[i]; } if (y[i] > y[nastepny] && y[i] > y[poprzedni] && y[i] > dol) { dol = y[i]; } } } if (gora > dol) { Console.WriteLine("Jądro istnieje, jego górna granica to " + gora + " a dolna wynosi " + dol); } else { if (gora == dol) { Console.WriteLine("Jądro istnieje, ale zawiera się w pasie o zerowej grubości. Znajduje się on na wysokości " + gora); } else { Console.WriteLine("Jądro nie istnieje"); } } Console.ReadKey(); } } } <file_sep>/grafy/kDrzewa/kDrzewa/Program.cs using System; using System.Collections.Generic; using System.IO; using System.Linq; namespace kDrzewa { class Program { static void Main(string[] args) { var s = new FileInfo(Directory.GetCurrentDirectory()); var s2 = s.Directory.Parent.Parent; String s3 = s2.ToString() + "\\dane.csv"; List<punkt> punktyX = new List<punkt>(); List<punkt> punktyY = new List<punkt>(); punkt dodawany; using (var reader = new StreamReader(s3)) { while (!reader.EndOfStream) { var line = reader.ReadLine(); var values = line.Split(','); dodawany = new punkt(Convert.ToInt32(values[0]), Convert.ToInt32(values[1])); punktyX.Add(dodawany); punktyY.Add(dodawany); } } foreach(punkt p in punktyX) { p.napisz(); } punktyX.Sort((a, b) => (a.porownajX(b))); punktyY.Sort((a, b) => (a.porownajY(b))); lisc kozen = buduj(punktyX, punktyY, 0); //kozen.napisz(); int x1; int x2; int y1; int y2; Console.WriteLine("Podaj dolną granicę zakresu na osi X"); x1 = Convert.ToInt32(Console.ReadLine()); Console.WriteLine("Podaj górną granicę zakresu na osi X"); x2 = Convert.ToInt32(Console.ReadLine()); Console.WriteLine("Podaj dolną granicę zakresu na osi Y"); y1 = Convert.ToInt32(Console.ReadLine()); Console.WriteLine("Podaj górną granicę zakresu na osi Y"); y2 = Convert.ToInt32(Console.ReadLine()); zapytanie(kozen, x1, x2, y1, y2); Console.ReadKey(); } static lisc buduj(List<punkt> punktyX, List<punkt> punktyY, int d) { if (punktyX.Count == 1) { lisc tmp = new lisc(d, 1, punktyX.ElementAt(0)); return tmp; } else { if(d%2 == 1) { //nieparzyste List<punkt> pol1X = new List<punkt>(); List<punkt> pol1Y = new List<punkt>(); List<punkt> pol2X = new List<punkt>(); List<punkt> pol2Y = new List<punkt>(); int ile = punktyY.Count / 2; int licznik = 0; int poziom; foreach(punkt p in punktyY) { if (licznik < ile) { pol1Y.Add(p); p.czyPierwsza(true); } else { pol2Y.Add(p); p.czyPierwsza(false); } licznik++; } foreach(punkt p in punktyX) { if (p.czyPierwsza() == true) { pol1X.Add(p); } else { pol2X.Add(p); } } poziom = pol1Y.Last().dajY(); /* Console.WriteLine("Pierwsza polowa"); foreach (punkt p in pol1) { p.napisz(); } Console.WriteLine("Druga polowa"); foreach (punkt p in pol2) { p.napisz(); } Console.WriteLine("Pozioma linia na " + poziom); */ lisc tmp = new lisc(d, 3, poziom); tmp.podepnijNaLewo(buduj(pol1X, pol1Y, d + 1)); tmp.podepnijNaPrawo(buduj(pol2X, pol2Y, d + 1)); return tmp; } else { //parzyste List<punkt> pol1X = new List<punkt>(); List<punkt> pol1Y = new List<punkt>(); List<punkt> pol2X = new List<punkt>(); List<punkt> pol2Y = new List<punkt>(); int ile = punktyX.Count / 2; int licznik = 0; int pion; foreach (punkt p in punktyX) { if (licznik < ile) { pol1X.Add(p); p.czyPierwsza(true); } else { pol2X.Add(p); p.czyPierwsza(false); } licznik++; } foreach (punkt p in punktyY) { if (p.czyPierwsza() == true) { pol1Y.Add(p); } else { pol2Y.Add(p); } } pion = pol1X.Last().dajX(); /* Console.WriteLine("Pierwsza polowa"); foreach (punkt p in pol1X) { p.napisz(); } Console.WriteLine("Druga polowa"); foreach (punkt p in pol2X) { p.napisz(); } Console.WriteLine("Pionowa linia na " + pion); */ lisc tmp = new lisc(d, 2, pion); tmp.podepnijNaLewo(buduj(pol1X, pol1Y, d + 1)); tmp.podepnijNaPrawo(buduj(pol2X, pol2Y, d + 1)); return tmp; } } } static void zapytanie(lisc kozen, int x1, int x2, int y1, int y2) { int typ = kozen.dajTyp(); switch (typ) { case 1: int x = kozen.dajPunkt().dajX(); int y = kozen.dajPunkt().dajY(); if (x >= x1 && x <= x2 && y >= y1 && y <= y2) Console.WriteLine("Punkt (" + x + "," + y + ") leży w obszarze zapytania."); break; case 2: int pion = kozen.dajPoziom(); if (pion >= x1) zapytanie(kozen.dajLewego(), x1, x2, y1, y2); if (pion <= x2) zapytanie(kozen.dajPrawego(), x1, x2, y1, y2); break; case 3: int poziom = kozen.dajPoziom(); if (poziom >= y1) zapytanie(kozen.dajLewego(), x1, x2, y1, y2); if (poziom <= y2) zapytanie(kozen.dajPrawego(), x1, x2, y1, y2); break; } } } } <file_sep>/obrazy/lab6/zad2-czasowe zmiany/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include <chrono> #include <ctime> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int hg1=75; int hd1=58; int sg1=106; int sd1=27; int vg1=255; int vd1=0; int hg2=178; int hd2=166; int sg2=224; int sd2=116; int vg2=238; int vd2=122; int hg3=119; int hd3=103; int sg3=192; int sd3=108; int vg3=182; int vd3=77; int hg; int hd; int sg; int sd; int vg; int vd; int main() { Mat image; Mat image2; Mat image3; Mat image4; Mat image5; Mat image6; Mat image7; VideoCapture cap; RNG rng(12345); cap.open(0); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); namedWindow("window5", CV_WINDOW_AUTOSIZE); namedWindow("window6", CV_WINDOW_AUTOSIZE); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 384); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 288); int licznik = 1; typedef std::chrono::high_resolution_clock Time; typedef std::chrono::milliseconds ms; typedef std::chrono::duration<float> fsec; auto start = Time::now(); while (1) { try { auto teraz = Time::now(); fsec roznica = teraz - start; ms czas = std::chrono::duration_cast<ms>(roznica); int czas2 = czas.count(); if (czas2 > 5000) { licznik=licznik+1; if (licznik > 3) licznik = 1; start = Time::now(); } switch (licznik) { case 1: hd = hd1; hg = hg1; sd = sd1; sg = sg1; vd = vd1; vg = vg1; break; case 2: hd = hd2; hg = hg2; sd = sd2; sg = sg2; vd = vd2; vg = vg2; break; case 3: hd = hd3; hg = hg3; sd = sd3; sg = sg3; vd = vd3; vg = vg3; break; } cap >> image; imshow("window", image); cvtColor(image, image2, CV_BGR2HSV); //ustalanie wartości inRange(image2, Scalar(hd, sd, vd), Scalar(hg, sg, vg), image3); imshow("window2", image3); Mat element1 = getStructuringElement(cv::MORPH_CROSS, cv::Size(2 * 3 + 1, 2 * 3 + 1), cv::Point(3, 3)); Mat element2 = getStructuringElement(cv::MORPH_CROSS, cv::Size(2 * 8 + 1, 2 * 8 + 1), cv::Point(8, 8)); erode(image3, image4, element1); erode(image4, image4, element1); imshow("window3", image4); dilate(image4, image5, element2); dilate(image5, image5, element2); imshow("window4", image5); vector<vector<Point>> kontury; vector<Vec4i> hier; findContours(image5, kontury, hier, RETR_TREE, CHAIN_APPROX_SIMPLE); vector<Moments> mu(kontury.size()); for (int i = 0; i < kontury.size(); i++) { mu[i] = moments(kontury[i], false); } vector<Point2f> mc(kontury.size()); for (int i = 0; i < kontury.size(); i++) { mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00); } Mat drawing = Mat::zeros(image.size(), CV_8UC3); for (int i = 0; i < kontury.size(); i++) { Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); drawContours(drawing, kontury, i, color, 2, 8, hier, 0, Point()); circle(drawing, mc[i], 4, color, -1, 8, 0); } imshow("window5", drawing); image.copyTo(image7); Moments m = moments(image5, true); Point p(m.m10 / m.m00, m.m01 / m.m00); circle(image7, p, 3, Scalar(0, 0, 255)); String x = to_string(m.m10 / m.m00); String y = to_string(m.m01 / m.m00); String text = "Wsp: " + x + "," + y; putText(image7, text, p, FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200, 200, 250), 1, CV_AA); imshow("window6", image7); } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/wzorce/singleton/singletonTesty/UnitTest1.cs using System; using Microsoft.VisualStudio.TestTools.UnitTesting; namespace singletonTesty { [TestClass] public class UnitTest1 { [TestMethod] public void dwojeDzieci() { var log1 = singleton.logerMlody1.dajLogera; var log2 = singleton.logerMlody2.dajLogera; Assert.AreSame(log1, log2); } [TestMethod] public void synZOjcem() { var log1 = singleton.logerMlody1.dajLogera; var log2 = singleton.logerStary1.dajLogera; Assert.AreSame(log1, log2); } [TestMethod] public void synZWujem() { var log1 = singleton.logerMlody1.dajLogera; var log2 = singleton.logerStary2.dajLogera; Assert.AreNotSame(log1, log2); } [TestMethod] public void starzyBracia() { var log1 = singleton.logerStary1.dajLogera; var log2 = singleton.logerStary2.dajLogera; Assert.AreNotSame(log1, log2); } } } <file_sep>/obrazy/lab8/zad2-kamera hue/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<opencv2/video/background_segm.hpp> #include<iomanip> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int can1; int can2; int robocza1; int robocza2; void on_trackbar(int, void*) { can1 = robocza1; can2 = robocza2; } int main() { Mat image; // obraz Ÿródłowy Mat image2; // maska MoG2 Mat image3; Mat image4; Ptr<BackgroundSubtractor> mog; //MOG2 Background subtractor VideoCapture cap; cap.open(0); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("Suwaki", 4); createTrackbar("Canny1", "Suwaki", &robocza1, 200, on_trackbar); createTrackbar("Canny2", "Suwaki", &robocza2, 200, on_trackbar); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 384); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 288); while (1) { try { cap >> image; imshow("window", image); Canny(image, image2, can1, can2); imshow("window2", image2); image.copyTo(image3); vector<Vec4i> lines; HoughLinesP(image2, lines, 1, CV_PI / 180, 50, 50, 10); for (size_t i = 0; i < lines.size(); i++) { Vec4i l = lines[i]; line(image3, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, CV_AA); } imshow("window3", image3); } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/obrazy/lab3/zad4-znak wodny na filmie/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int main() { Mat image; Mat image2; Mat image3; cv::VideoCapture cap("D:\\klatki\\film.avi"); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); image2 = imread("D:\\klatki\\logo.bmp", CV_LOAD_IMAGE_COLOR); while (1) { try { cap >> image; cap >> image3; imshow("window", image); imshow("window2", image2); Rect Rec(0, 0, 80, 64); Mat Roi = image(Rec); addWeighted(Roi, 1, image2, 2, 0.0, Roi); Rect WhereRec(0, 0, Roi.cols, Roi.rows); Roi.copyTo(image3(WhereRec)); imshow("window3", image3); } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/grafy/gałąź/grafy1/punkt.cs using System; using System.Collections.Generic; using System.Text; namespace gałąź { class punkt { private int x; private int y; public punkt(int a, int b) { x = a; y = b; } public int dajX() { return x; } public int dajY() { return y; } public void napisz() { Console.WriteLine(x + "," + y); } public int porownaj1(punkt a) { if (x != a.dajX()) { if (x > a.dajX()) { return -1; } else { return 1; } } else { if (x > a.dajY()) { return 1; } else { return -1; } } } public int dystans() { return x + y; } public int porownaj2(punkt a) { if (dystans() != a.dystans()) { if (dystans() > a.dystans()) { return -1; } else { return 1; } } else { if (x > a.dajX()) { return 1; } else { return -1; } } } public bool tenSam(punkt a) { bool wynik = false; if (x == a.dajX() && y == a.dajY()) wynik = true; return wynik; } } } <file_sep>/obrazy/lab7/zad1a-bez klas/obrazy/obiekt.cpp #include "obiekt.h" #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iomanip> #include<iostream> #include<conio.h> using namespace cv; using namespace std; class obiekt { private: Point wspolzendne; public: obiekt::obiekt(Point p) { wspolzendne = p; } obiekt::~obiekt() { } } <file_sep>/seminarium/part2/part2/Program.cs using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Threading.Tasks; namespace part2 { class Program { //public static String adresTekst = "D:\\studia\\magisterka\\sem2\\seminarium\\oczyszczony.txt"; //public static String adresDane = "D:\\studia\\magisterka\\sem2\\seminarium\\rejestr.txt"; public static String adresTekst = "D:\\studia\\sem2\\seminarium\\oczyszczony.txt"; public static String adresDane = "D:\\studia\\sem2\\seminarium\\rejestr.txt"; public static String tekst = ""; public static List<litera> litery = new List<litera>(); static void Main(string[] args) { litery.Add(new litera('a')); litery.Add(new litera('ą')); litery.Add(new litera('b')); litery.Add(new litera('c')); litery.Add(new litera('ć')); litery.Add(new litera('d')); litery.Add(new litera('e')); litery.Add(new litera('ę')); litery.Add(new litera('f')); litery.Add(new litera('g')); litery.Add(new litera('h')); litery.Add(new litera('i')); litery.Add(new litera('j')); litery.Add(new litera('k')); litery.Add(new litera('l')); litery.Add(new litera('ł')); litery.Add(new litera('m')); litery.Add(new litera('n')); litery.Add(new litera('ń')); litery.Add(new litera('o')); litery.Add(new litera('ó')); litery.Add(new litera('p')); litery.Add(new litera('r')); litery.Add(new litera('s')); litery.Add(new litera('ś')); litery.Add(new litera('t')); litery.Add(new litera('u')); litery.Add(new litera('w')); litery.Add(new litera('y')); litery.Add(new litera('z')); litery.Add(new litera('ż')); litery.Add(new litera('ź')); Console.WriteLine("Witaj w dziwnym huffmanowskim programiku made by wardasz"); while (true) { menu(); int wybor = Convert.ToInt32(Console.ReadLine()); Console.Clear(); switch (wybor) { case 1: wypiszTekst(); break; case 2: wypiszLitery(); break; case 3: wczytajTekst(); break; case 4: policz(); break; case 5: zapisz(); break; case 6: wczytajTekst(); policz(); zapisz(); break; case 0: return; default: Console.WriteLine("Niezrozumiano polecenia"); break; } Console.WriteLine(); Console.WriteLine(); } } static void menu() { Console.WriteLine("Co chcesz zrobić? (wpisz cyfrę)"); Console.WriteLine("1-Wypisz wczytany tekst"); Console.WriteLine("2-Wypisz listę liter"); Console.WriteLine("3-Wczytaj tekst"); Console.WriteLine("4-Policz znaki w tekście"); Console.WriteLine("5-Zapisz znaki do pliku"); Console.WriteLine("6-Wombo combo"); Console.WriteLine("0-zamknij program"); } public static void wypiszTekst() { Console.WriteLine(tekst); } public static void wypiszLitery() { foreach (litera l in litery) { l.napisz(); } } public static void wczytajTekst() { tekst = System.IO.File.ReadAllText(adresTekst); } public static void policz() { foreach (char znak in tekst) { litera l = litery.Select(n => n).Where(x => x.dajLitere() == znak).FirstOrDefault(); if (l != null) { l.podbij(); } } litery.Sort((b, a) => (a.ileRazy().CompareTo(b.ileRazy()))); int suma = 0; foreach (litera l in litery) { suma = suma + l.ileRazy(); } foreach (litera l in litery) { l.policzProcent(suma); } } public static void zapisz() { Console.WriteLine("Podaj opis:"); string opis = Console.ReadLine(); foreach (litera l in litery) { opis = opis + " " + l.dajLitere() + "-" + l.jakiProcent(); } using (StreamWriter writer = new StreamWriter(adresDane, true)) { writer.WriteLine(opis); } } } } <file_sep>/obrazy/lab3/zad2-porównanie z poprzednią/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int main() { Mat image; Mat image2; Mat image3; Mat poprzednia; cv::VideoCapture capture("D:\\klatki\\robot_no_loop.avi"); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); bool poczatek = true; while (1) { try { capture >> image; cvtColor(image, image, CV_BGR2GRAY); imshow("window", image); if (poczatek == false) { imshow("window2", poprzednia); absdiff(image, poprzednia, image2); imshow("window3", image2); threshold(image2, image3, 100, 255, CV_THRESH_BINARY); imshow("window4", image3); poprzednia = image; } else { poprzednia = image; poczatek = false; } } catch (Exception e) { capture.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { capture.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/obrazy/lab5/zad1-Canny/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int main() { Mat image; Mat image2; Mat image3; Mat image4; VideoCapture cap; cap.open(0); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 384); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 288); while (1) { try { cap >> image; imshow("window", image); Canny(image, image2, 100, 200); imshow("window2", image2); Canny(image, image3, 10, 200); imshow("window3", image3); Canny(image, image4, 100, 20); imshow("window4", image4); } catch (Exception e) { cap.open(1); } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/obrazy/lab7/zad1a-bez klas/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iomanip> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int robocza1; int robocza2; int robocza3; int robocza4; int robocza5; int robocza6; int hgora; int hdol; int sgora; int sdol; int vgora; int vdol; void on_trackbar(int, void*) { hgora = robocza1; hdol = robocza2; sgora = robocza3; sdol = robocza4; vgora = robocza5; vdol = robocza6; } int main() { Mat image; Mat image2; Mat image3; Mat image4; Mat image5; Mat image6; Mat image7; VideoCapture cap; RNG rng(12345); namedWindow("Suwaki", 4); createTrackbar("Hgora", "Suwaki", &robocza1, 255, on_trackbar); createTrackbar("Hdol", "Suwaki", &robocza2, 255, on_trackbar); createTrackbar("Sgora", "Suwaki", &robocza3, 360, on_trackbar); createTrackbar("Sdul", "Suwaki", &robocza4, 360, on_trackbar); createTrackbar("Vgora", "Suwaki", &robocza5, 255, on_trackbar); createTrackbar("Vdol", "Suwaki", &robocza6, 255, on_trackbar); cap.open(0); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); namedWindow("window5", CV_WINDOW_AUTOSIZE); namedWindow("window6", CV_WINDOW_AUTOSIZE); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 384); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 288); while (1) { try { cap >> image; imshow("window", image); cvtColor(image, image2, CV_BGR2HSV); //ustalanie wartości inRange(image2, Scalar(hdol, sdol, vdol), Scalar(hgora, sgora, vgora), image3); imshow("window2", image3); Mat element1 = getStructuringElement(cv::MORPH_CROSS, cv::Size(2 * 3 + 1, 2 * 3 + 1), cv::Point(3, 3)); Mat element2 = getStructuringElement(cv::MORPH_CROSS, cv::Size(2 * 8 + 1, 2 * 8 + 1), cv::Point(8, 8)); erode(image3, image4, element1); erode(image4, image4, element1); imshow("window3", image4); dilate(image4, image5, element2); dilate(image5, image5, element2); imshow("window4", image5); image.copyTo(image7); vector<vector<Point>> kontury; vector<Vec4i> hier; findContours(image5, kontury, hier, RETR_TREE, CHAIN_APPROX_SIMPLE); vector<Moments> mu(kontury.size()); for (int i = 0; i < kontury.size(); i++) { mu[i] = moments(kontury[i], false); } vector<Point2f> mc(kontury.size()); for (int i = 0; i < kontury.size(); i++) { mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00); } Mat drawing = Mat::zeros(image.size(), CV_8UC3); for (int i = 0; i < kontury.size(); i++) { Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); drawContours(drawing, kontury, i, color, 2, 8, hier, 0, Point()); circle(drawing, mc[i], 4, color, -1, 8, 0); } imshow("window5", drawing); //tu już jest ogarnięte rysowanie //tylko trzeba jeszcze te punkty pozamykać w obiektach klas for (int i = 0; i < kontury.size(); i++) { Point p(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00); circle(image7, p, 3, Scalar(0, 0, 255)); String x = to_string(mu[i].m10 / mu[i].m00); String y = to_string(mu[i].m01 / mu[i].m00); String text = "Wsp: " + x + "," + y; putText(image7, text, p, FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200, 200, 250), 1, CV_AA); } imshow("window6", image7); } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/grafy/gałąź2/gałąź2/Program.cs using System; using System.Collections.Generic; using System.IO; namespace gałąź2 { class Program { public static List<podzbior> policzone; static void Main(string[] args) { policzone = new List<podzbior>(); var s = new FileInfo(Directory.GetCurrentDirectory()); var s2 = s.Directory.Parent.Parent; String sciezka = s2.ToString() + "\\dane.csv"; List<punkt> punkty = new List<punkt>(); punkt tmp; using (var reader = new StreamReader(sciezka)) { while (!reader.EndOfStream) { var line = reader.ReadLine(); var values = line.Split(','); tmp = new punkt(Convert.ToInt32(values[0]), Convert.ToInt32(values[1])); punkty.Add(tmp); } } List<string> maski = new List<string>(); for (int x = 1; x <= (Math.Pow(2, punkty.Count) - 1); x++) { string maska = Convert.ToString(x, 2); while (maska.Length < punkty.Count) { maska = "0" + maska; } maski.Add(maska); } for(int x = 1; x < punkty.Count; x++) { foreach (string maska in maski) { int suma = 0; foreach(char c in maska) { if (c == '1') { suma++; } } if (suma == x) { List<punkt> nowa = new List<punkt>(); for (int i = 0; i < punkty.Count; i++) { if (maska[i] == '1') { nowa.Add(punkty[i]); } } podzbior nowy = new podzbior(nowa); policzone.Add(nowy); } } } podzbior wynik = new podzbior(punkty); wynik.ukozen(); List<linia> rozwiazanie = wynik.dajGalaz(); foreach(linia l in rozwiazanie) { l.napisz(); } Console.ReadKey(); } public static podzbior szukaj(List<punkt> punkty) { foreach(podzbior p in policzone) { if (p.czyTo(punkty)) return p; } return null; } } } <file_sep>/grafy/paraPunktow/paraPunktow2/listy.cs using System; using System.Collections.Generic; using System.Text; namespace paraPunktow2 { class listy { private List<punkt> Sx = new List<punkt>(); private List<punkt> Sy = new List<punkt>(); public listy() { } public List<punkt> dajSx() { return Sx; } public List<punkt> dajSy() { return Sy; } public void dodaj(punkt p) { Sx.Add(p); Sy.Add(p); } public void dodajX(punkt p) { Sx.Add(p); } public void dodajY(punkt p) { Sy.Add(p); } public void wypisz() { Console.WriteLine("Sx:"); foreach (punkt p in Sx) { p.napisz(); } Console.WriteLine("Sy:"); foreach (punkt p in Sy) { p.napisz(); } Console.WriteLine(); } public void sortuj() { Sx.Sort((a, b) => (a.porownajX(b))); Sy.Sort((a, b) => (a.porownajY(b))); } } } <file_sep>/grafy/kDrzewa/kDrzewa/punkt.cs using System; using System.Collections.Generic; using System.Text; namespace kDrzewa { class punkt { private int x; private int y; private bool pierwsza; public punkt(int a, int b) { x = a; y = b; } public int dajX() { return x; } public int dajY() { return y; } public bool czyPierwsza() { return pierwsza; } public void czyPierwsza(bool a) { pierwsza = a; } public int porownajX(punkt a) { if (x < a.dajX()) return -1; else if (x > a.dajX()) return 1; else if (y > a.dajY()) return 1; else return -1; } public int porownajY(punkt a) { if (y < a.dajY()) return -1; else if (y > a.dajY()) return 1; else if (x > a.dajX()) return 1; else return -1; } public void napisz() { Console.WriteLine(x + "," + y); } } } <file_sep>/obrazy/lab8/zad1-kamera mog/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<opencv2/video/background_segm.hpp> #include<iomanip> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int robocza1; int robocza2; int history; int nmixtures; Ptr<BackgroundSubtractorMOG2> mog; //MOG2 Background subtractor void on_trackbar1(int, void*) { history = robocza1; mog->setHistory(history); } void on_trackbar2(int, void*) { nmixtures = robocza2; mog = createBackgroundSubtractorMOG2(history, 16, true); mog->setNMixtures(nmixtures); } int main() { Mat image; // obraz Ÿródłowy Mat image2; // maska MoG2 Mat image3; Mat image4; VideoCapture cap; cap.open(0); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); namedWindow("Suwaki", 4); createTrackbar("History", "Suwaki", &robocza1, 200, on_trackbar1); createTrackbar("Nmixtures", "Suwaki", &robocza2, 200, on_trackbar2); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 384); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 288); mog = createBackgroundSubtractorMOG2(history, 16, true); while (1) { try { cap >> image; imshow("window", image); mog->apply(image, image2); imshow("window2", image2); mog->getBackgroundImage(image3); imshow("window3", image3); vector<vector<Point>> kontury; vector<Vec4i> hier; findContours(image2, kontury, hier, RETR_TREE, CHAIN_APPROX_SIMPLE); vector<Moments> mu(kontury.size()); for (int i = 0; i < kontury.size(); i++) { mu[i] = moments(kontury[i], false); } vector<Point2f> mc(kontury.size()); for (int i = 0; i < kontury.size(); i++) { mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00); } image.copyTo(image4); for (int i = 0; i < kontury.size(); i++) { Scalar color = Scalar(0, 0, 255); drawContours(image4, kontury, i, color, 2, 8, hier, 0, Point()); circle(image4, mc[i], 4, color, -1, 8, 0); } imshow("window4", image4); } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/grafy/wyznaczaniePrzecięć/wyznaczaniePrzecięć/Program.cs using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Threading.Tasks; namespace wyznaczaniePrzeciec { class Program { static void Main(string[] args) { List<zdarzenie> zdazenia = new List<zdarzenie>(); int ax; int ay; int bx; int by; //var s = new FileInfo(Directory.GetCurrentDirectory()); //var s2 = s.Directory.Parent.Parent; //String sciezka = s2.ToString() + "\\dane.csv"; String sciezka = "D:\\studia\\sem2\\grafy\\wyznaczaniePrzecięć\\wyznaczaniePrzecięć\\dane.csv"; using (var reader = new StreamReader(sciezka)) { while (!reader.EndOfStream) { var line = reader.ReadLine(); var values = line.Split(','); ax = Convert.ToInt32(values[0]); ay = Convert.ToInt32(values[1]); bx = Convert.ToInt32(values[2]); by = Convert.ToInt32(values[3]); zdarzenie tmp; if(ax == bx) { if (ay > by) { tmp = new zdarzenie(2, ax, ay, by); zdazenia.Add(tmp); } else { tmp = new zdarzenie(2, ax, by, ay); zdazenia.Add(tmp); } } else { if (ax > bx) { tmp = new zdarzenie(3, ax, ay); zdazenia.Add(tmp); tmp = new zdarzenie(1, bx, by); zdazenia.Add(tmp); } else { tmp = new zdarzenie(1, ax, ay); zdazenia.Add(tmp); tmp = new zdarzenie(3, bx, by); zdazenia.Add(tmp); } } } } //napiszListe(zdazenia); zdazenia.Sort((a, b) => (a.porownaj(b))); //napiszListe(zdazenia); zamiataj(zdazenia); Console.ReadKey(); } static void napiszListe(List<zdarzenie> lista) { foreach(zdarzenie z in lista) { int typ = z.dajTyp(); int x = z.dajX(); int y = z.dajY(); int y2 = z.dajY2(); switch (typ) { case 1: Console.WriteLine("Zdarzenie na X=" + x + " Początek odcinka na wysokosci " + y); break; case 2: Console.WriteLine("Zdarzenie na X=" + x + " Pion na wysokosci " + y + "-" + y2); break; case 3: Console.WriteLine("Zdarzenie na X=" + x + " Koniec odcinka na wysokosci " + y); break; } } Console.WriteLine(); } static void zamiataj(List<zdarzenie> lista) { List<int> poziomy = new List<int>(); foreach (zdarzenie z in lista) { int typ = z.dajTyp(); int x = z.dajX(); int y = z.dajY(); int y2 = z.dajY2(); switch (typ) { case 1: bool dodano = false; for (int i = 0; i < poziomy.Count(); i++) { if (y < poziomy.ElementAt(i)) { poziomy.Insert(i, y); dodano = true; break; } } if (dodano == false) { poziomy.Add(y); } break; case 2: foreach (int p in poziomy) { if(p<= y && p >= y2) { Console.WriteLine("Wykryto przecięcie w punkcie (" + x + "," + p + ")."); } } break; case 3: foreach (int p in poziomy) { if (p == y) { poziomy.Remove(p); break; } } break; } } } } } <file_sep>/grafy/gałąź/grafy1/Program.cs using System; using System.Collections.Generic; using System.IO; using System.Linq; namespace gałąź { class Program { public static List<linia> galaz; static void Main(string[] args) { var s = new FileInfo(Directory.GetCurrentDirectory()); var s2 = s.Directory.Parent.Parent; String sciezka = s2.ToString() + "\\dane.csv"; List<punkt> punkty = new List<punkt>(); List<punkt> skrajne = new List<punkt>(); List<kandydat> kandydaci = new List<kandydat>(); galaz = new List<linia>(); punkt tmp; using (var reader = new StreamReader(sciezka)) { while (!reader.EndOfStream) { var line = reader.ReadLine(); var values = line.Split(','); tmp = new punkt(Convert.ToInt32(values[0]), Convert.ToInt32(values[1])); punkty.Add(tmp); } } punkty.Sort((a, b) => (a.porownaj1(b))); while (punkty.Count > 1) { int miotla = 0; skrajne.Clear(); kandydaci.Clear(); foreach (punkt pu in punkty) { if (pu.dajY() >= miotla) { skrajne.Add(pu); miotla = pu.dajY(); } } if (skrajne.Count() == 1) { punkt jest = skrajne.ElementAt(0); punkt max = new punkt(0, 0); foreach (punkt pun in punkty) { if (pun.porownaj2(max) == -1 && pun.tenSam(jest) == false) { max = pun; } } skrajne.Add(max); } skrajne.Sort((a, b) => (a.porownaj1(b))); for (int i = 1; i < skrajne.Count(); i++) { kandydaci.Add(new kandydat(skrajne.ElementAt(i - 1), skrajne.ElementAt(i))); } kandydaci.Sort((a, b) => (a.porownaj(b))); kandydat wybrany = kandydaci.ElementAt(0); punkt p = wybrany.dajP(); punkt q = wybrany.dajQ(); punkty.Remove(p); punkty.Remove(q); punkt pq = new punkt(wybrany.dajX(), wybrany.dajY()); bool dodano = false; for (int i = 0; i < punkty.Count(); i++) { if (pq.porownaj1(punkty.ElementAt(i)) == -1) { punkty.Insert(i, pq); dodano = true; break; } } if (dodano == false) { punkty.Add(pq); } dodajLinie(p, pq); dodajLinie(q, pq); } punkt r = new punkt(0, 0); dodajLinie(punkty.ElementAt(0), r); foreach(linia l in galaz) { l.napisz(); } Console.ReadKey(); } public static void dodajLinie(punkt p1, punkt p2) { if (p1.dajX() == p2.dajX() && p1.dajY() == p2.dajY()) { return; } if (p1.dajX() == p2.dajX() || p1.dajY() == p2.dajY()) { galaz.Add(new linia(p1, p2)); return; } punkt tmp = new punkt(Math.Max(p1.dajX(), p2.dajX()), Math.Min(p1.dajY(), p2.dajY())); dodajLinie(p1, tmp); dodajLinie(p2, tmp); } } } <file_sep>/wzorce/singleton/singleton/Program.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace singleton { class Program { static void Main(string[] args) { var log1 = logerMlody1.dajLogera; var log2 = logerMlody2.dajLogera; log1.loguj(); log1.okresl(); log2.loguj(); log2.okresl(); Console.ReadKey(); } } } <file_sep>/grafy/wyznaczaniePrzecięć/wyznaczaniePrzecięć/zdarzenie.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace wyznaczaniePrzeciec { class zdarzenie { private int typ; //1-początek odcinka, 2-pionowy, 3-koniec odcinka private int kiedy; private int wysokosc; private int dol; public zdarzenie(int t, int x, int y) { typ = t; kiedy = x; wysokosc = y; dol = y; } public zdarzenie(int t, int x, int y1, int y2) { typ = t; kiedy = x; wysokosc = y1; dol = y2; } public int dajTyp() { return typ; } public int dajX() { return kiedy; } public int dajY() { return wysokosc; } public int dajY2() { return dol; } public int porownaj(zdarzenie a) { if (kiedy < a.dajX()) return -1; if (kiedy > a.dajX()) return 1; if (typ < a.dajTyp()) return -1; else return 1; } } } <file_sep>/grafy/kDrzewa/kDrzewa/lisc.cs using System; using System.Collections.Generic; using System.Text; namespace kDrzewa { class lisc { private int d; private int typ; //1-punkt, 2-pion, 3-poziom private int wspolzendna; private punkt punkcik; private lisc lewy; private lisc prawy; public lisc(int D, int T, int wspol) { d = D; typ = T; wspolzendna = wspol; lewy = null; prawy = null; } public lisc(int D, int T, punkt p) { d = D; typ = T; punkcik = p; lewy = null; prawy = null; } public int dajD() { return d; } public int dajTyp() { return typ; } public int dajPoziom() { return wspolzendna; } public punkt dajPunkt() { return punkcik; } public lisc dajLewego() { return lewy; } public lisc dajPrawego() { return prawy; } public void podepnijNaLewo(lisc a) { lewy = a; } public void podepnijNaPrawo(lisc a) { prawy = a; } public void napisz() { if (lewy != null) lewy.napisz(); Console.Write("Liść na głębokości " + d); switch (typ) { case 1: Console.WriteLine("; punkt o współżędnych (" + punkcik.dajX() + "," + punkcik.dajY() + ")"); break; case 2: Console.WriteLine("; linia pionowa o współżędnej " + wspolzendna); break; case 3: Console.WriteLine("; linia pozioma o współżędnej " + wspolzendna); break; } if (prawy != null) prawy.napisz(); } } } <file_sep>/obrazy/lab7/zad1a-bez klas/obrazy/pole.h #pragma once class pole { public: pole(); pole(cv::Point); ~pole(); cv::Point dajKoordynaty(); void ustawKoordynaty(cv::Point); private: cv::Point koordynaty; }; <file_sep>/grafy/paraPunktow/paraPunktow2/punkt.cs using System; using System.Collections.Generic; using System.Text; namespace paraPunktow2 { class punkt { private int x; private int y; bool przed; public punkt(int a, int b) { x = a; y = b; } public int dajX() { return x; } public int dajY() { return y; } public bool czyPrzed() { return przed; } public void ustawPrzed(bool a) { przed = a; } public void napisz() { Console.WriteLine(x + "," + y); } public int porownajX(punkt a) { if (x < a.dajX()) return -1; else if (x > a.dajX()) return 1; else if (y > a.dajY()) return 1; else return -1; } public int porownajY(punkt a) { if (y < a.dajY()) return -1; else if (y > a.dajY()) return 1; else if (x > a.dajX()) return 1; else return -1; } public double dystans(punkt a) { int X = x - a.dajX(); if (X < 0) X = X * -1; int Y = y - a.dajY(); if (Y < 0) Y = Y * -1; double dys = X * X + Y * Y; return dys; } } } <file_sep>/grafy/sumy/sumy/podzbior.cs using System; using System.Collections.Generic; using System.Text; namespace sumy { class podzbior { private List<skladnik> skladniki; public podzbior(List<skladnik> a) { skladniki = new List<skladnik>(a); } public podzbior(int a) { skladnik nowy = new skladnik(a); skladniki = new List<skladnik>(); skladniki.Add(nowy); } public List<skladnik> dajListe() { return skladniki; } public int dajSume() { int suma = 0; foreach(skladnik i in skladniki) { suma = suma + i.dajWartosc(); } return suma; } public void plusik(int a) { foreach(skladnik i in skladniki) { i.zwieksz(a); } } } } <file_sep>/obrazy/lab4/zad1-obraz z kamery/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int main() { Mat image; Mat image2; Mat image3; Mat akumulator; Mat pierwsza; double alfa = 0.05; VideoCapture cap; cap.open(0); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 320); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); namedWindow("window5", CV_WINDOW_AUTOSIZE); bool czy = true; while (1) { try { cap >> image; cvtColor(image, image, CV_BGR2GRAY); imshow("window", image); if (czy == true) { image.copyTo(pierwsza); } imshow("window2", pierwsza); absdiff(image, pierwsza, image2); imshow("window3", image2); threshold(image2, image3, 30, 255, CV_THRESH_BINARY); imshow("window4", image3); if (czy == true) { czy = false; image3.copyTo(akumulator); } for (int i = 0; i < akumulator.rows; i++) { for (int j = 0; j < akumulator.cols; j++) { akumulator.at<uchar>(i, j) = (int)((1 - alfa)*akumulator.at<uchar>(i, j) + alfa * image3.at<uchar>(i, j)); } } imshow("window5", akumulator); } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } if (waitKey(40) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; } <file_sep>/obrazy/lab11/zad/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<opencv2/objdetect/objdetect.hpp> #include<opencv2/video/tracking.hpp> #include<opencv2/videoio.hpp> #include<opencv2/video/background_segm.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; bool czyZaDaleko(Point2f p1, Point2f p2, int odleglosc) { return (abs(p1.x - p2.x) > odleglosc || abs(p1.y - p2.y) > odleglosc); } int main() { Mat stara; Mat nowa; Mat nowaGray; Mat staraGray; Mat robocza; Mat rysunki; vector<Point2f> rogi; vector<Point2f> sledzone; vector<Point2f> tmp; vector<uchar> status; vector<float> blendy; Ptr<BackgroundSubtractorMOG2> mog; //MOG2 Background subtractor mog = createBackgroundSubtractorMOG2(500, 16, true); Mat front; Mat tlo; VideoCapture cap; cap.open(0); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 384); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 288); bool pierwsza = true; int wersja = 1; bool detekcja = true; bool kontynuacja = true; namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); while (1) { try { if (kontynuacja == true) { if (detekcja == true) { if (wersja == 1) { //różnica jasności if (pierwsza == true) { cap >> stara; cvtColor(stara, staraGray, CV_BGR2GRAY); pierwsza = false; } else { cap >> nowa; nowa.copyTo(rysunki); imshow("window", nowa); cvtColor(nowa, nowaGray, CV_BGR2GRAY); absdiff(nowaGray, staraGray, robocza); CvSize rozmiar; rozmiar.width = (10 * 2) + 1; rozmiar.height = (10 * 2) + 1; GaussianBlur(robocza, robocza, rozmiar, 0); threshold(robocza, robocza, 80, 255, CV_THRESH_BINARY); vector<vector<Point>> kontury; vector<Vec4i> hier; findContours(robocza, kontury, hier, RETR_TREE, CHAIN_APPROX_SIMPLE); if (kontury.size() > 0) { int najwiekszy = 0; vector<Point> max = kontury[0]; for (int i = 0; i < kontury.size(); i++) { if (contourArea(kontury[i]) > contourArea(kontury[najwiekszy])) { najwiekszy = i; max = kontury[i]; } } drawContours(rysunki, kontury, najwiekszy, Scalar(240, 1, 1), 2, 8, hier, 0, Point()); rectangle(rysunki, boundingRect(max), Scalar(1, 240, 1), 2, 8, 0); vector<Point> otoczka; convexHull(max, otoczka, false); for (int i = 0; i < otoczka.size(); i++) { Point a = otoczka[i]; int j = i + 1; if (j == otoczka.size()) j = 0; Point b = otoczka[j]; line(rysunki, a, b, Scalar(1, 1, 240), 2, 8, 0); } Moments centrum; centrum = moments(max, false); Point p(centrum.m10 / centrum.m00, centrum.m01 / centrum.m00); String x = to_string(centrum.m10 / centrum.m00); String y = to_string(centrum.m01 / centrum.m00); String tekst = "Wsp: " + x; tekst = tekst + "," + y; putText(rysunki, tekst, p, FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 0), 1, CV_AA); circle(rysunki, p, 2, Scalar(0, 0, 0), 3); } imshow("window2", rysunki); nowa.copyTo(stara); nowaGray.copyTo(staraGray); } }else{ if (wersja == 2) { //optical flow if (pierwsza == true) { cap >> stara; cvtColor(stara, staraGray, COLOR_BGR2GRAY); tmp.clear(); sledzone.clear(); goodFeaturesToTrack(staraGray, tmp, 30, 0.1, 10); for (int i = 0; i < tmp.size(); ++i) { sledzone.push_back(tmp[i]); } pierwsza = false; } else { cap >> nowa; nowa.copyTo(rysunki); imshow("window", nowa); cvtColor(nowa, nowaGray, COLOR_BGR2GRAY); if (sledzone.size() > 0) { calcOpticalFlowPyrLK(staraGray, nowaGray, sledzone, rogi, status, blendy); } int licznik = 0; for (int i = 0; i < status.size(); i++) { if (status[i] == false || czyZaDaleko(sledzone[licznik], rogi[licznik], 2) == false) { sledzone.erase(sledzone.begin() + licznik); rogi.erase(rogi.begin() + licznik); } else { licznik++; } } for (int i = 0; i < sledzone.size(); ++i) { circle(rysunki, sledzone[i], 3, Scalar(0, 0, 255), -1, 8); line(rysunki, sledzone[i], rogi[i], Scalar(0, 0, 255)); } if (sledzone.size() < 10) { nowaGray.copyTo(staraGray); tmp.clear(); sledzone.clear(); goodFeaturesToTrack(staraGray, tmp, 30, 0.1, 10); for (int i = 0; i < tmp.size(); ++i) { sledzone.push_back(tmp[i]); } } rogi.clear(); status.clear(); imshow("window2", rysunki); } } else //gausiański mix { cap >> nowa; nowa.copyTo(rysunki); imshow("window", nowa); mog->apply(nowa, front); mog->getBackgroundImage(tlo); vector<vector<Point>> kontury2; vector<Vec4i> hier2; findContours(front, kontury2, hier2, RETR_TREE, CHAIN_APPROX_SIMPLE); vector<Moments> mu2(kontury2.size()); for (int i = 0; i < kontury2.size(); i++) { mu2[i] = moments(kontury2[i], false); } vector<Point2f> mc2(kontury2.size()); for (int i = 0; i < kontury2.size(); i++) { mc2[i] = Point2f(mu2[i].m10 / mu2[i].m00, mu2[i].m01 / mu2[i].m00); } for (int i = 0; i < kontury2.size(); i++) { Scalar color = Scalar(0, 0, 255); drawContours(rysunki, kontury2, i, color, 2, 8, hier2, 0, Point()); circle(rysunki, mc2[i], 4, color, -1, 8, 0); } imshow("window2", rysunki); } } }else { cap >> rysunki; imshow("window", rysunki); imshow("window2", rysunki); } } } catch (Exception e) { cap.release(); cvDestroyAllWindows(); break; } int znak = waitKey(15); switch (znak) { case 27: cap.release(); cvDestroyAllWindows(); return 0; case 49: wersja = 1; pierwsza = true; break; case 50: wersja = 2; pierwsza = true; break; case 51: wersja = 3; pierwsza = true; break; case 52: if (detekcja == true) { detekcja = false; }else { detekcja = true; } break; case 53: if (kontynuacja == true) { kontynuacja = false; } else { kontynuacja = true; } break; default: break; } } return 0; }<file_sep>/obrazy/projekt/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<opencv2/objdetect/objdetect.hpp> #include<iostream> #include<conio.h> #define CVUI_IMPLEMENTATION #include "cvui.h" #define WINDOW_NAME "Panel Sterowania" using namespace cv; using namespace std; int robocza1; int robocza2; int robocza3; int robocza4; double skala; int sasiedzi; int rozmiarMin; int rozmiarMax; void on_trackbar(int, void*) { skala = (1.05 + ((double)robocza1 / 20)); sasiedzi = robocza2 + 3; rozmiarMin = robocza3 + 10; rozmiarMax = robocza4 + 150; } int main() { cv::Mat frame = cv::Mat(330, 250, CV_8UC3); cvui::init(WINDOW_NAME); bool wybrano = false; VideoCapture cap; cv::namedWindow("Suwaki", 4); cv::createTrackbar("scaleFactor", "Suwaki", &robocza1, 8, on_trackbar); cv::createTrackbar("minNeighbors", "Suwaki", &robocza2, 3, on_trackbar); cv::createTrackbar("minSize", "Suwaki", &robocza3, 50, on_trackbar); cv::createTrackbar("maxSize", "Suwaki", &robocza4, 150, on_trackbar); skala = 1.1; sasiedzi = 3; rozmiarMin = 10; rozmiarMax = 250; Mat zrodlo; Mat szara; Mat rysunki; Mat wynik; vector<Rect> twarze; CascadeClassifier kaskadaTwarzy; int numer = 1; int numerZrzutu = 1; int tryb = 0; //0-nic, 1-prostokąt, 2-blur, 3-podmiana twarzy bool zapis = false; int zrzuty = 0; string nazwa; stringstream ss; int fps = 15; CvSize rozmiar; rozmiar.width = 640; rozmiar.height = 480; VideoWriter twozony; Mat obszarObraz; Rect obszarKordy; Mat twaz; Point centrum; Mat zapisana; Mat wstawiana; Point pg; Point ld; string sciezkaOdczytu; string sciezkaZapisu; string sciezkaTwazy; string tmp; vector<int> compression_params; compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); compression_params.push_back(100); cv::namedWindow("window", CV_WINDOW_AUTOSIZE); cv::namedWindow("window2", CV_WINDOW_AUTOSIZE); String twazeNazwa = "D:\\klatki\\haarcascade_frontalface_alt.xml"; if (!kaskadaTwarzy.load(twazeNazwa)) { return -1; }; while (1) { try { frame = cv::Scalar(49, 52, 49); if (cvui::button(frame, 20, 10, "Przetwarzaj obraz z kamery")) { cap.open(0); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); wybrano = true; } if (cvui::button(frame, 20, 50, "Przetwarzaj obraz z dysku")) { cout << "Podaj nazwę pliku który chcesz otworzyć: "; cin >> tmp; sciezkaOdczytu = "D:\\klatki\\"; ss.str(std::string()); ss << tmp; sciezkaOdczytu += string(ss.str()); sciezkaOdczytu += ".avi"; cap = cv::VideoCapture(sciezkaOdczytu); wybrano = true; } if (cvui::button(frame, 20, 90, "Wylacz maskowanie")) { tryb = 0; } if (cvui::button(frame, 20, 130, "Dodaj prostokąt")) { tryb = 1; } if (cvui::button(frame, 20, 170, "Dodaj rozmycie")) { tryb = 2; } if (cvui::button(frame, 20, 210, "Podstaw obraz")) { tryb = 3; cout << "Podaj nazwę pliku który chcesz wstawić: "; cin >> tmp; sciezkaTwazy = "D:\\klatki\\"; ss.str(std::string()); ss << tmp; sciezkaTwazy += string(ss.str()); sciezkaTwazy += ".jpg"; twaz = imread(sciezkaTwazy, CV_LOAD_IMAGE_UNCHANGED); } if (cvui::button(frame, 20, 250, "Wlacz/wylacz nagrywanie")) { if (zapis == false) { zapis = true; sciezkaZapisu = "D:\\klatki\\nowy"; ss.str(std::string()); ss << numer; sciezkaZapisu += string(ss.str()); sciezkaZapisu += ".avi"; twozony = VideoWriter(sciezkaZapisu, CV_FOURCC('D', 'I', 'V', 'X'), fps, rozmiar, true); } else { zapis = false; numer++; twozony.release(); } } if (cvui::button(frame, 20, 290, "Zapisz twarze")) { cout << "Podaj liczbę klatek dla których chcesz zapisać zrzuty twarzy: "; cin >> zrzuty; } cvui::imshow(WINDOW_NAME, frame); if (wybrano == true) { cap >> zrodlo; resize(zrodlo, zrodlo, CvSize(640, 480), 0, 0, CV_INTER_LINEAR); zrodlo.copyTo(rysunki); zrodlo.copyTo(wynik); cvtColor(zrodlo, szara, CV_BGR2GRAY); equalizeHist(szara, szara); kaskadaTwarzy.detectMultiScale(szara, twarze, skala, sasiedzi, 0 | CV_HAAR_SCALE_IMAGE, Size(rozmiarMin, rozmiarMin), Size(rozmiarMax, rozmiarMax)); for (size_t i = 0; i < twarze.size(); i++) { obszarObraz = zrodlo(twarze[i]); obszarKordy = Rect(twarze[i].x, twarze[i].y, twarze[i].width, twarze[i].height); centrum = Point(twarze[i].x + twarze[i].width*0.5, twarze[i].y + twarze[i].height*0.5); ellipse(rysunki, centrum, Size(twarze[i].width*0.5, twarze[i].height*0.5), 0, 0, 360, Scalar(0, 0, 255), 4, 8, 0); switch (tryb) { case 1: //czarny prostokąt pg = Point(twarze[i].x, twarze[i].y+(twarze[i].height*0.2)); ld = Point(twarze[i].x+ twarze[i].width, twarze[i].y + (twarze[i].height * 0.6)); rectangle(wynik, pg, ld, Scalar(0, 0, 0), -1); break; case 2: //blur GaussianBlur(obszarObraz, obszarObraz, CvSize(55, 55), 0); obszarObraz.copyTo(wynik(obszarKordy)); break; case 3: //podstawienie twarzy twaz.copyTo(wstawiana); resize(wstawiana, wstawiana, CvSize(twarze[i].width, twarze[i].height), 0, 0, CV_INTER_LINEAR); wstawiana.copyTo(wynik(obszarKordy)); break; default: break; } if (zrzuty > 0) { //robienei zrzutów string sciezka = "D:\\klatki\\zrzut"; stringstream ss; ss << numerZrzutu; sciezka += string(ss.str()); sciezka += ".jpg"; bool bSuccess = imwrite(sciezka, wynik(obszarKordy), compression_params); numerZrzutu++; } } if (zapis == true) { circle(rysunki, Point(10, 10), 6, Scalar(0, 0, 255), -1, 8); twozony.write(wynik); } if (zrzuty > 0) { zrzuty--; } cv::imshow("window", rysunki); cv::imshow("window2", wynik); } } catch (Exception e) { //if (zapis == true) { // twozony.release(); //} //cap.release(); //cvDestroyAllWindows(); //break; } if (waitKey(15) == 27) { if (zapis == true) { twozony.release(); } cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/grafy/sumy/sumy/Program.cs using System; using System.Collections.Generic; using System.IO; namespace sumy { class Program { static void Main(string[] args) { List<podzbior> podzbiory = new List<podzbior>(); List<skladnik> skladniki = new List<skladnik>(); int limit; var s = new FileInfo(Directory.GetCurrentDirectory()); var s2 = s.Directory.Parent.Parent; String sciezka = s2.ToString() + "\\dane.csv"; bool pierwsza = true; skladnik tmp; using (var reader = new StreamReader(sciezka)) { while (!reader.EndOfStream) { var line = reader.ReadLine(); if (pierwsza == true) { limit = Convert.ToInt32(line); pierwsza = false; } else { tmp = new skladnik(Convert.ToInt32(line)); skladniki.Add(tmp); } } } int n = skladniki.Count; skladniki.Sort((a, b) => (a.porownaj(b))); foreach(skladnik i in skladniki) { Console.WriteLine(i.dajWartosc() + " " + i.GetHashCode()); } podzbior start = new podzbior(0); for(int i=0; i<n; i++) { podzbior a = new podzbior(podzbiory[i].dajListe()); podzbior b = new podzbior(podzbiory[i].dajListe()); b.plusik(skladniki[i + 1].dajWartosc()); //podzbior kolejny = } Console.ReadKey(); } } } <file_sep>/seminarium/part2/part2/litera.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace part2 { public class litera { private char znak; private int wystopienia; private double procent; public litera(char a) { znak = a; wystopienia = 0; procent = 0; } public char dajLitere() { return znak; } public int ileRazy() { return wystopienia; } public double jakiProcent() { return procent; } public void podbij() { wystopienia++; } public void zeruj() { wystopienia = 0; } public void policzProcent(int suma) { procent = (double)wystopienia / (double)suma; } public void napisz() { if(wystopienia>0) Console.WriteLine(znak + ": " + wystopienia + " - " + (procent*100) + "%"); } } } <file_sep>/wzorce/singleton/singleton/logerStary2.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace singleton { public class logerStary2 : logerBaza<logerStary2> { new public void loguj() { Console.WriteLine("ociec dwa pisze"); } } } <file_sep>/obrazy/lab2/zad1-negatyw/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int main() { Mat image; Mat image2; Mat image3; VideoCapture cap; cap.open(0); namedWindow("window", CV_WINDOW_AUTOSIZE); double dWidth = cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); double dHeight = cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); while (1) { try { cap >> image; imshow("window", image); cvtColor(image, image2, CV_BGR2GRAY); imshow("window2", image2); image3 = image2; for (int i = 0; i < image2.rows; i++) { for (int j = 0; j < image2.cols; j++) { image3.at<uchar>(i, j) = 255 - image3.at<uchar>(i, j); } } imshow("window3", image3); } catch (Exception e) { cap.open(1); } if (waitKey(15) == 27) { cap.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/grafy/gałąź2/gałąź2/podzbior.cs using System; using System.Collections.Generic; using System.Text; namespace gałąź2 { class podzbior { private List<punkt> punkty; private List<linia> galaz; private punkt kozen; private int koszt; public podzbior(List<punkt> lista) { punkty = new List<punkt>(lista); policzKozen(); koszt = 0; galaz = new List<linia>(); if (punkty.Count == 2) { dodajLinie(punkty[0], kozen); dodajLinie(punkty[1], kozen); foreach (linia l in galaz) { koszt = koszt + l.dlugosc(); } } if(punkty.Count > 2) { foreach(punkt p in punkty) { List<punkt> tmp = new List<punkt>(punkty); tmp.Remove(p); podzbior mniejszy = Program.szukaj(tmp); if (koszt == 0) { galaz = new List<linia>(mniejszy.dajGalaz()); punkt malyKozen = mniejszy.dajKozen(); dodajLinie(malyKozen, kozen); dodajLinie(p, kozen); foreach (linia l in galaz) { koszt = koszt + l.dlugosc(); } } else { List<linia> staraGalonz = new List<linia>(galaz); galaz = new List<linia>(mniejszy.dajGalaz()); punkt malyKozen = mniejszy.dajKozen(); dodajLinie(malyKozen, kozen); dodajLinie(p, kozen); int nowyKoszt = 0; foreach (linia l in galaz) { nowyKoszt = nowyKoszt + l.dlugosc(); } if (nowyKoszt < koszt) { koszt = nowyKoszt; } else { galaz = new List<linia>(staraGalonz); } } } } } public List<linia> dajGalaz() { return galaz; } public punkt dajKozen() { return kozen; } public void ukozen() { punkt zero = new punkt(0, 0); dodajLinie(zero, kozen); } public bool czyTo(List<punkt> lista) { if (lista.Count == punkty.Count) { foreach(punkt p in lista) { if (punkty.Contains(p) == false) return false; } } else { return false; } return true; } private void policzKozen() { int x = punkty[0].dajX(); int y = punkty[0].dajY(); foreach(punkt p in punkty) { if (p.dajX() < x) x = p.dajX(); if (p.dajY() < y) y = p.dajY(); } kozen = new punkt(x, y); } public void dodajLinie(punkt p1, punkt p2) { if (p1.dajX() == p2.dajX() && p1.dajY() == p2.dajY()) { return; } if (p1.dajX() == p2.dajX() || p1.dajY() == p2.dajY()) { galaz.Add(new linia(p1, p2)); return; } punkt tmp = new punkt(Math.Max(p1.dajX(), p2.dajX()), Math.Min(p1.dajY(), p2.dajY())); dodajLinie(p1, tmp); dodajLinie(p2, tmp); } } } <file_sep>/obrazy/lab3/zad1-porównanie z pierwszą/obrazy/main.cpp #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<iostream> #include<conio.h> using namespace cv; using namespace std; int main() { Mat image; Mat image2; Mat image3; Mat pierwsza; cv::VideoCapture capture("D:\\klatki\\robot_no_loop.avi"); namedWindow("window", CV_WINDOW_AUTOSIZE); namedWindow("window2", CV_WINDOW_AUTOSIZE); namedWindow("window3", CV_WINDOW_AUTOSIZE); namedWindow("window4", CV_WINDOW_AUTOSIZE); bool czy = true; while (1) { try { capture >> image; cvtColor(image, image, CV_BGR2GRAY); imshow("window", image); if (czy == true) { pierwsza = image; czy = false; } imshow("window2", pierwsza); absdiff(image, pierwsza, image2); imshow("window3", image2); threshold(image2, image3, 100, 255, CV_THRESH_BINARY); imshow("window4", image3); } catch (Exception e) { capture.release(); cvDestroyAllWindows(); break; } if (waitKey(15) == 27) { capture.release(); cvDestroyAllWindows(); break; } } return 0; }<file_sep>/grafy/gałąź2/gałąź2/linia.cs using System; using System.Collections.Generic; using System.Text; namespace gałąź2 { class linia { private punkt p1; private punkt p2; public linia(punkt P1, punkt P2) { p1 = P1; p2 = P2; } public int dlugosc() { int dysX = p1.dajX() - p2.dajX(); if (dysX < 0) dysX = dysX * -1; int dysY = p1.dajY() - p2.dajY(); if (dysY < 0) dysY = dysY * -1; return dysX + dysY; } public void napisz() { Console.WriteLine("Linia łącząca punkty (" + p1.dajX() + "," + p1.dajY() + ") oraz (" + p2.dajX() + "," + p2.dajY() + ")"); } } }
6f6cc5797cc59b6a1083becefa66d7552a9493ae
[ "C#", "C++" ]
51
C#
wardasz/sem2
cc9c1fe6375528c8f8ae9ea6ae73380959f40676
78247de61fc6ab3bae6bd279b62025b34bbdb11f
refs/heads/master
<file_sep># Forked from https://github.com/thingsSDK/wifiscanner The purpose of this repo is to add an additional field to the json output for `rssi`. Sample output will look like ``` [ { rssi: -123, ssid: 'wifi with-n0-s3cur1ty!', mac: '16:0d:7f:49:da:e1', channel: '1', security: ['None'] }, { rssi: -172, ssid: 'WEP enabled', mac: '16:0d:7f:49:da:e2', channel: '1', security: ['WEP'] }, { rssi: -187, ssid: 'WPA1 Enabled', mac: '16:0d:7f:49:da:e3', channel: '1', security: ['WPA'] }, { rssi: -234, ssid: 'WPA1+WPA2', mac: '16:0d:7f:49:da:e4', channel: '1', security: ['WPA', 'WPA2'], }, { rssi: -251, ssid: 'WPA2 Only', mac: '16:0d:7f:49:da:e5', channel: '1', security: ['WPA2'] } ] ```
69eb56fb83159376b7185114f617a1900cfc0bea
[ "Markdown" ]
1
Markdown
zafrani/zafrani-rpi-wifiscanner
d3b60f84130f3af2f2f9562aeac3a6fa7d002f06
dcba0e8f4bc6c320a95d21852609ccf96d4d4d4b
refs/heads/main
<repo_name>Aruna2005/java-test-sample<file_sep>/README.md # java-test-sample java-test-sample
f62e755e16db8983050b13b06ea6e7d9d15b4ef5
[ "Markdown" ]
1
Markdown
Aruna2005/java-test-sample
defe36963decc35d40db4118e2225c33ce61bf44
1ce77fd8f075ffeb575753edd63b66bf1b6e7d54
refs/heads/master
<file_sep>These are LATEX files for capstone project, a pairs trading model implemented on distributed Python platform. Download "rar" file if you want to run the project (with codes, data and video instruction). This capstone project implements a distributed Python platform that can be used to test quantitative models for trading financial instruments in a network setting under client/server infrastructure. Normally, we backtest locally using past historical data to check the performance of our trading strategies. The performance result, in this case, is usually an illusion of what the actual performance is in real-time trading. We also show in this paper this conclusion by showing that our quantitative trading model performs much worse in the simulated trading than that in backtesting environment. Therefore, we build this Python platform not only for implementing trading strategies and backtesting them historically but also for simulating trades similar to what is in real market, acting as another control before real-time trading.
a502b5116ee184f15aabe867a707c1a7451ae524
[ "Markdown" ]
1
Markdown
kiloppertry/Pairs-Trading-with-Machine-Learning-on-Distributed-Python-Platform
02152dbab938787a619cfdac2fefc399a2fc7bda
e7c1335cdb4369892ee7bfbb66228815d8f3d034
refs/heads/master
<file_sep>import glob from os.path import dirname, basename, join views_collection = glob.glob(join(dirname(__file__), "views_collection", "*.py")) for f in views_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("views_collection", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) <file_sep>from django.db import models from .AuthGroup import AuthGroup from django.contrib.auth.models import User class MessageLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, default="") time = models.DateTimeField() type = models.CharField(max_length=25, default="") title = models.CharField(max_length=25) log = models.CharField(max_length=100) read = models.BooleanField(default=False) def __str__(self): return self.title class Meta: verbose_name_plural = 'MessageLog' <file_sep>#!/usr/bin/env bash export DEBUG_MODE=True echo '===DEBUG MODE====' echo $DEBUG_MODE echo '=================' sudo /etc/init.d/redis-server stop sudo /etc/init.d/redis-server start ./make-migrations-and-migrate sudo service postgresql stop sudo service postgresql start cd .. && python3 manage.py test cd script python3 ../backup_thread.py & python3 ../updateCache_thread.py & python3 ../manage.py runserver 172.17.0.2:8000 --insecure <file_sep>from django.shortcuts import render from ..models import * from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from django.db import connection from django.views import View import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) class Tables(View): def get(self, request): now = datetime.now() plant_table = PlantTableHandler() messageHandler = MessageCenterHandler(self.request) messageHandler.setNow(now) contextHandler = ContextHandler() contextHandler.join(plant_table) contextHandler.join(messageHandler) contextHandler.fillInContext() return render(self.request, 'template_dashboard/tables.html', contextHandler.getContext()) <file_sep># Generated by Django 3.0.8 on 2021-01-07 15:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0020_auto_20201111_1430'), ] operations = [ migrations.CreateModel( name='Connections', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('server_name', models.CharField(max_length=25)), ('number', models.IntegerField(blank=True, null=True)), ('time', models.DateTimeField()), ], options={ 'verbose_name_plural': 'connections', }, ), ] <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class WarningStatusHander(ModelDataHandler): def getData(self): #Status data warning_count = TaskStatus.objects.get(task_name="WARNING COUNT") return { 'title': warning_count.task_name, 'status': warning_count.status } def getTitle(self): return 'warning_count_data' def updateStatusData(self, statusData): task = TaskStatus.objects.get(task_name="WARNING COUNT") task.status = statusData task.save() def create_fake_data(self, status): task = TaskStatus() task.task_name = "WARNING COUNT" task.status = status task.save() <file_sep>from django.shortcuts import render from ..models import * from django.views.decorators.csrf import csrf_exempt import json from datetime import datetime, timedelta import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View class UpdateLogMessageView(View): def post(self, request): msgUpdateHandler = UpdateMessageHandler(request) contextHandler = ContextHandler() contextHandler.join(msgUpdateHandler) contextHandler.fillInContext() return render(request, 'template_dashboard/update_message_log_code_piece.html', contextHandler.getContext()) <file_sep>from django.shortcuts import render from ...models import * from django.contrib.auth.tokens import default_token_generator from django.contrib.auth.models import User import sys import os from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib from django.template.loader import render_to_string from django.utils.http import urlsafe_base64_encode from django.utils.encoding import force_bytes class MailSender(): def __init__(self): #Sending email self.content = MIMEMultipart() self.template = "" self.config = {} self.mail_sended = False def setSubject(self, subject): self.content["subject"] = subject def setSmtpAccount(self, smtp): self.content["from"] = smtp def setSendTo(self, to): self.content["to"] = to def setEmailTemplate(self, path): self.template = path def setConfig(self, config): self.config = config def sendMail(self, account, pwd): self.content.attach(MIMEText(render_to_string(self.template, self.config))) with smtplib.SMTP(host="smtp.gmail.com", port="587") as smtp: try: smtp.ehlo() smtp.starttls() smtp.login(account, pwd) smtp.send_message(self.content) print("Complete!") self.mail_sended = True except Exception as e: print("Error message: ", e) def has_sent(self): return self.mail_sended <file_sep>from django.db import models class TaskStatus(models.Model): task_name = models.CharField(max_length=25) status = models.CharField(max_length=25) def __str__(self): return str(self.task_name) class Meta: verbose_name_plural = 'TaskStatus' <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from django.views import View from django.views.decorators.clickjacking import xframe_options_sameorigin import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) class Reconstruction(View): @xframe_options_sameorigin def get(self, request): contextHandler = ContextHandler() return render(self.request, 'template_opensfm/reconstruction.html', contextHandler.getContext()) <file_sep># Generated by Django 3.0.8 on 2020-10-30 08:30 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0011_plantdata'), ] operations = [ migrations.RenameField( model_name='plantdata', old_name='price', new_name='growth_rate', ), migrations.RemoveField( model_name='plantdata', name='product', ), migrations.RemoveField( model_name='plantdata', name='time', ), migrations.AddField( model_name='plantdata', name='aruco_id', field=models.IntegerField(default=-1), ), migrations.AddField( model_name='plantdata', name='image_url', field=models.CharField(max_length=100, null=True), ), migrations.AddField( model_name='plantdata', name='seed_date', field=models.DateTimeField(null=True), ), migrations.AddField( model_name='plantdata', name='status', field=models.CharField(max_length=100, null=True), ), migrations.AddField( model_name='plantdata', name='type', field=models.CharField(max_length=25, null=True), ), ] <file_sep>from selenium import webdriver import unittest import time class NewVisitorTest(unittest.TestCase): def setUp(self): # before running test self.browser = webdriver.Firefox() # WARNING don't rely on implicitly_wait! it won't work for every use case # self.browser.implicitly_wait(10) def tearDown(self): # after running test self.browser.quit() def test_can_start_a_list_and_retrieve_it_later(self): # any test starts with 'test_' self.browser.get('https://plantmonitor.mooo.com') time.sleep(3) self.assertIn("SB Admin 2 - Dashboard", self.browser.title) # __name__ is __main__ : that's how a Python script checks if it's been executed from the command # line, rather than just imported by another script if __name__ == '__main__': unittest.main(warnings='ignore') <file_sep>from django.db import models class GrowthRate(models.Model): time = models.DateTimeField(null=True) plant_id = models.IntegerField(blank=True, null=True) rate = models.FloatField(null=True, blank=True, default=None) def __str__(self): return str(self.time)+"_"+str(self.plant_id) class Meta: verbose_name_plural = 'GrowthRate' <file_sep>import os import sys from datetime import datetime, timedelta sys.path.append("..") from secure_data.secure_data_loader import SecureDataLoader secure_data_loader = SecureDataLoader() if __name__ == '__main__': # Run dumping data script & Save .sql in backup directory command = 'pg_dump "host='+secure_data_loader.secure_data['POSTGRES_HOST']+' \ port='+secure_data_loader.secure_data['POSTGRES_PORT']+' \ dbname='+secure_data_loader.secure_data['POSTGRES_DB_NAME']+' \ user='+secure_data_loader.secure_data['POSTGRES_USER']+' \ password='+secure_data_loader.secure_data['POSTGRES_PASSWORD']+'" > '+secure_data_loader.secure_data['BACKUP_DIR']+'/backup.sql' os.system(command) # print(command) # os.system('ls -l') # Change working directory os.chdir(secure_data_loader.secure_data['BACKUP_DIR']) # print(os.getcwd()) # cwd: current working directory # Setup git config if os.path.isdir(".git") is False: os.system('git init') os.system('git config --global user.email "'+secure_data_loader.secure_data['GIT_EMAIL']+'"') os.system('git config --global user.name "'+secure_data_loader.secure_data['GIT_USER']+'"') # Git add . os.system('git add .') commit_time = (datetime.now()+ timedelta(hours=8)).strftime('%Y-%m-%d-%H-%M') command = 'git commit -m "'+commit_time+'"' os.system(command) # print(command) os.system("git log --all --oneline --decorate --graph") <file_sep># Plant Monitor A Django web app to visualize plant data and system status > To realize automatic planting in our farmland, I decided to create an application to get the data and system status, and visualize them as well. This application provides a convenient way to manage the farmland remotely. ![image](https://github.com/ArthurWuTW/django_project/blob/master/readme_materials/1.png) ![image](https://github.com/ArthurWuTW/django_project/blob/master/readme_materials/5.png) ![image](https://github.com/ArthurWuTW/django_project/blob/master/readme_materials/4.png) ## Features - **[Python](https://www.python.org/)** with **[Django](https://www.djangoproject.com/)** framework - **[Postgresql](https://www.postgresql.org/)** database with **[Django](https://www.djangoproject.com/)** ORM - Chart visualization with **[Chart.js](https://www.chartjs.org/)** - Image serialization/deserialization with **[OpenCV](https://opencv.org/)** - Environment variables management with **[Docker](https://www.docker.com/)** in another repository - Email authentication for activating account and resetting forgot password as well - Configuration management - Database backup management - Field 3d reconstruction with **[OpenSfm](https://www.opensfm.org/)** - Frontend template with **[sb admin 2](https://github.com/StartBootstrap/startbootstrap-sb-admin-2)** - Cache with **[Redis](https://redis.io/)** - Web server gateway interface with **[Gunicorn](https://gunicorn.org/)** - Reverse proxy server with **[Nginx](https://nginx.org/en/)** - SSL certificate with **[Let's encrypt](https://letsencrypt.org/)** - Penetration testing using **[OWASP ZAP](https://owasp.org/)** - DNS hosting using **[FreeDNS](https://freedns.afraid.org/)** ## Django Project Structure ``` django_project/ ├── backup_git.py # backup .sql with git version control ├── backup_thread.py # the python script to backup .sql ├── data_directory │   ├── postgresql # persistent db directory │   └── README.MD # instructions to store the persistent db in this directory ├── data_image ├── django_project │   ├── asgi.py │   ├── settings.py │   ├── urls.py │   └── wsgi.py ├── manage.py ├── monitor_app │   ├── admin.py │   ├── apps.py │   ├── migrations │   ├── models_collection # one file contains only one class, and file name is the same as class name │   ├── models.py # import all the models in models_collection │   ├── static │   │   └── dashboard │   ├── system_check # run unit test before starting the app │   ├── templates │   │   └── template_dashboard │   ├── tests.py │   ├── urls.py # url settings │   ├── views_collection # one file contains only one class, and file name is the same as class name │   │   └── handlers # view handlers │   └── views.py # import all the views in views_collection directory ├── readme_materials ├── script │   ├── create-apps │   ├── create-superuser │   ├── make-migrations-and-migrate │   └── start-project-server # run web application ├── secure_data │   ├── secure_data_example.json # secure data configuration │   └── secure_data_loader.py └── test_script ``` ## Setup #### 1. Linux Environment See repository **[Django-docker-script](https://github.com/ArthurWuTW/django-docker-script)** and follow the instructions to install docker and create docker image. #### 2. Database Postgresql Database is saved as a persistent file(s) outside Docker. If you first setup the database, See **[README](https://github.com/ArthurWuTW/django-project/tree/master/data_directory)**, and if you have existed database, copy and paste into **[data_directory](https://github.com/ArthurWuTW/django-project/tree/master/data_directory)** #### 3. Configuration See directory **[secure_data](https://github.com/ArthurWuTW/django-project/tree/master/secure_data)** #### 4. Others - 3D Reconstruction App(OpenSfm) > We need to create another Docker image for OpenSfm, See **[docker-script-opensfm](https://github.com/ArthurWuTW/docker-script-opensfm)** for Docker and **[OpenSfm custom fork](https://github.com/ArthurWuTW/OpenSfM)** - Hardware in Farmland > DC motor, belt, DC power supply, L298N and Raspberry. Code run in raspberry pi is in **[HERE](https://github.com/ArthurWuTW/crawler-script)** - Local Backup Directory > Create an empty directory with git init for database backup management ## Run Server ```sh # start container cd <DOCKER_REPO_DIR>/docker ./project-start-container # enter container ./project-enter-container-shell # run app cd <DJANGO_PROJECT_DIR>/script ./start-project-server ``` ## Extension #### 1. Create a new Class-based View > In general, the code of Django View classes is written in file <strong><APP_DIR>/views.py</strong>, but as time goes by the code grows and becomes more and more complicated. In order to make the code clean, every class is written into a single file(.py) located in views_collection directory. The file name has to be the same as the name of view class. For example, there is a class named "ViewExample", and its file name must be ViewExample.py ```py from django.views import View class ViewsExample(View): def get(self, request): handler = Handler() contextHandler = ContextHandler() contextHandler.join(handler) contextHandler.fillInContext() return render(request, "XX.html", contextHandler.getContext()) def post(self, request): handler = Handler() contextHandler = ContextHandler() contextHandler.join(handler) contextHandler.fillInContext() return render(request, "XX.html", contextHandler.getContext()) ``` #### 2. Create a new Data Handler Data handler which wants to fill data into the Context has to inherit ModelDataHandler class and overwrite getData and getTitle methods. ```py # monitor_app/views_collection/handlers/ModelDataHandler.py import abc class ModelDataHandler(metaclass=abc.ABCMeta): @abc.abstractmethod def getData(self): return NotImplemented @abc.abstractmethod def getTitle(self): return NotImplemented ``` ContextHandler collects data handlers and generates a Context dictionary by their keys(title) and values(data). ```py # monitor_app/views_collection/handlers/ContextHandler.py class ContextHandler(): def __init__(self): self.data_handler_list = list() self.context = {} def join(self, dataHandler): self.data_handler_list.append(dataHandler) def fillInContext(self): for data in self.data_handler_list: self.context[data.getTitle()] = data.getData() def getContext(self): return self.context ``` #### 3. Create a new Class-based Model Every model class is written into a single file(.py) located in models_collection directory. The file name has to be the same as the name of model class. For example, there is a class named "ModelExample", and its file name must be ModelExample.py ```py from django.db import models class ModelExample(models.Model): name = models.CharField(max_length=25) status = models.CharField(max_length=25) def __str__(self): return str(self.name) class Meta: verbose_name_plural = 'Model' ``` <file_sep># data_directory This directory stores the persistent database files ## Prerequisites #### Move django_project directory to <USER_HOME>/Desktop ## Setup Persistent Database Files #### 1. Modify container settings and enter container shell Delete the line ```sh # file: run-container ... ... $DATABASE_OPTS \ ... ... ``` in **[django-docker-script/docker/project/run-container](https://github.com/ArthurWuTW/django-docker-script/blob/master/docker/project/run-container)**, start container and then enter the container shell > We need to disable docker volume mapping otherwise database files will be replaced with empty directory(data_directory should have nothing at this moment) #### 2. Inside container, run the command below to copy the original Postgresql data files to this directory ```sh sudo rsync -av /var/lib/postgresql /<path>/django_project/data_directory ``` #### 3. exit container, stop container #### 4. restore the code in step 1 #### 5. start container again > At this moment, /<USER_HOME>/Desktop/django_project/data_directory/postgresql has been mapping to /var/lib/postgresql in /<DJANGO_DOCKER_REPO>/docker/project/run-container Finish ## Note(Redundant) > this section just saves some postesql information and old method which is never used Steps: 1. Moving the PostgreSQL Data Directory: default data_directory is "/var/lib/postgresql/9.5/main" ```sh sudo rsync -av /var/lib/postgresql /<path>/django-project/data_directory ``` 2. Pointing to the New Data Location: the data_diretory configuration is in "/etc/postgresql/9.5/main/postgresql.conf" ```sh sudo vim /etc/postgresql/9.5/main/postgresql.conf ``` postgresql.conf ``` ... data_directory = '<path>/django_project/data_directory/postgresql/9.5/main' ... ``` 3. Restarting PostgreSQL ``` sudo service postgresql restart ``` Or ``` sudo service postgresql stop && sudo service postgresql start ``` <file_sep># run AppConfig.ready() default_app_config = 'monitor_app.apps.MonitorAppConfig' <file_sep># Generated by Django 3.0.8 on 2020-11-11 14:30 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0019_taskstatus'), ] operations = [ migrations.DeleteModel( name='CpuTemperature', ), migrations.DeleteModel( name='TimePrice', ), ] <file_sep># Generated by Django 3.0.8 on 2020-10-29 17:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0008_auto_20201029_2343'), ] operations = [ migrations.CreateModel( name='AuthGroup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('group', models.CharField(max_length=25)), ], options={ 'verbose_name_plural': 'AuthGroup', }, ), ] <file_sep>/* body color #4e73df */ /* card color #4e73df */ body{ background-color: #4e73df; } .card{ margin-left: -1rem; margin-right: -1rem; margin-top: 3rem; } .text-align-center{ text-align: center; } .row{ justify-content: center; } .custom-image{ max-width: 100%; height: 400px; } .google-font{ font-family: 'ABeeZee'; font-size: 22px; } .custom-textbox{ margin-top: 0.9rem; border-radius: 40px; border:1px solid #aaa; padding: 13px; font-size:10px; width: 300px; } .custom-btn{ background-color:#4e73df; padding: 10px; border-radius: 40px; font-size: 13px; width:300px; border: 1px solid The #aaa; } .error-text{ color: red; font-size: 15px; text-align: center; } <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class PiCpuTempStatusHander(ModelDataHandler): def getData(self): #Status data pi_cpu_temperature = TaskStatus.objects.get(task_name="PI CPU TEMPERATURE") return { 'title': pi_cpu_temperature.task_name, 'status': pi_cpu_temperature.status } def getTitle(self): return 'pi_cpu_temperature_data' def updateStatusData(self, statusData): task = TaskStatus.objects.get(task_name="PI CPU TEMPERATURE") task.status = statusData task.save() def create_fake_data(self, status): task = TaskStatus() task.task_name = "PI CPU TEMPERATURE" task.status = status task.save() <file_sep>from django.shortcuts import render from ..models import * from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from django.db import connection from django.views import View from django.core.cache import cache import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) class SysLog(View): def get(self, request): now = datetime.now() threshold_timestamp = now - timedelta(hours=48) connHandler = ConnectionHandler() connHandler.setQueryServerName("Backup") connHandler.setTitle("connections_data") connHandler.setTimezoneShift(timedelta(hours=8)) connHandler.setThresholdTimestamp(threshold_timestamp) connHandler.useCacheData(cache) connHandler_private_server = ConnectionHandler() connHandler_private_server.setQueryServerName("PrivateServer") connHandler_private_server.setTitle("connections_data_private_server") connHandler_private_server.setTimezoneShift(timedelta(hours=8)) connHandler_private_server.setThresholdTimestamp(threshold_timestamp) connHandler_private_server.useCacheData(cache) connHandler_backup_cpu = ConnectionHandler() connHandler_backup_cpu.setQueryServerName("BackupCpuPercentage") connHandler_backup_cpu.setTitle("connections_data_backup_cpu") connHandler_backup_cpu.setTimezoneShift(timedelta(hours=8)) connHandler_backup_cpu.setThresholdTimestamp(threshold_timestamp) connHandler_backup_cpu.useCacheData(cache) connHandler_backup_mem = ConnectionHandler() connHandler_backup_mem.setQueryServerName("BackupMemPercentage") connHandler_backup_mem.setTitle("connections_data_backup_mem") connHandler_backup_mem.setTimezoneShift(timedelta(hours=8)) connHandler_backup_mem.setThresholdTimestamp(threshold_timestamp) connHandler_backup_mem.useCacheData(cache) connHandler_webserver_cpu = ConnectionHandler() connHandler_webserver_cpu.setQueryServerName("WebServerCpuPercentage") connHandler_webserver_cpu.setTitle("connections_data_webserver_cpu") connHandler_webserver_cpu.setTimezoneShift(timedelta(hours=8)) connHandler_webserver_cpu.setThresholdTimestamp(threshold_timestamp) connHandler_webserver_cpu.useCacheData(cache) connHandler_webserver_mem = ConnectionHandler() connHandler_webserver_mem.setQueryServerName("WebServerMemPercentage") connHandler_webserver_mem.setTitle("connections_data_webserver_mem") connHandler_webserver_mem.setTimezoneShift(timedelta(hours=8)) connHandler_webserver_mem.setThresholdTimestamp(threshold_timestamp) connHandler_webserver_mem.useCacheData(cache) messageHandler = MessageCenterHandler(self.request) messageHandler.setNow(now) contextHandler = ContextHandler() contextHandler.join(messageHandler) contextHandler.join(connHandler) contextHandler.join(connHandler_private_server) contextHandler.join(connHandler_backup_cpu) contextHandler.join(connHandler_backup_mem) contextHandler.join(connHandler_webserver_cpu) contextHandler.join(connHandler_webserver_mem) contextHandler.fillInContext() return render(self.request, 'template_dashboard/sysLog.html', contextHandler.getContext()) <file_sep># Generated by Django 3.0.8 on 2020-09-22 06:33 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0002_auto_20200921_2335'), ] operations = [ migrations.CreateModel( name='CpuTemperature', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('cpuTemperature', models.FloatField(blank=True, default=None, null=True)), ('time', models.DateTimeField()), ], options={ 'verbose_name_plural': 'Cpu Temperature', }, ), ] <file_sep>import datetime import threading import time from termcolor import colored import signal import os stop = False def signal_handler(signal, frame): global stop print_str = '[] Ctrl+C KeyboardInterupt' print(colored(print_str, 'yellow', attrs=['bold'])) stop = True def job(): count_hours = 0 while not stop: count_hours +=1 print_str = '[BACKUP LOG] Hours Count = '+str(count_hours) print(colored(print_str, 'yellow', attrs=['bold'])) time.sleep(60*60) if(count_hours == 3): # Do Backup print(colored('[BACKUP LOG] Backup process starts', 'yellow', attrs=['bold'])) os.system('python3 ../backup_git.py') # Reset Count count_hours = 0 if __name__ == '__main__': t = threading.Thread(target = job) t.start() print(colored('[BACKUP LOG] Thread process starts', 'yellow', attrs=['bold'])) signal.signal(signal.SIGINT, signal_handler) signal.pause() <file_sep>$(document).on("click", "#plant-img", function(){ var src = $(this).attr('src') // alert(src); $("#modal-img").attr('src',src); $("#joes").modal('show'); // return false to prevent the page goes to top return false; }) <file_sep>#!/usr/bin/env bash if [ $# -eq 0 ];then echo "-h to see the usage" exit 1 fi #Install getops dependencies # output 1 means yet installed dpkg-query -l util-linux >/dev/null 2>&1 if [ $? -eq 1 ]; then sudo apt-get update >/dev/null 2>&1 sudo apt-get install -y util-linux >/dev/null 2>&1 fi create_app() { cd .. python3 manage.py startapp $1 } while getopts "hn:" o; do case "$o" in h) echo "create-app" echo "" echo "usage: create-app [arguments]" echo "Arguments:" echo " -n [fname] Create django apps with directory named [fname] " echo " -h Print Help(this message) and exit" echo "" ;; n) echo "APP_NAME : ${OPTARG}" create_app ${OPTARG} ;; *) echo "-h to see the usage!" ;; esac done <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class TemperatureHandler(ModelDataHandler): def __init__(self): self.timezone_shift = None self.threshold_timestamp = None def setTimezoneShift(self, timedeltaObject): self.timezone_shift = timedeltaObject print(self.timezone_shift) def setThresholdTimestamp(self, datetimeObject): self.threshold_timestamp = datetimeObject print(self.threshold_timestamp) def getData(self): #override temps = Temperature.objects.filter(time__gte=(self.threshold_timestamp)) data = dict() data['timestamp_array'] = [(temp.time + self.timezone_shift).strftime('%m/%d %H:%M') for temp in temps] data['temp_array'] = [temp.temperature for temp in temps] return json.dumps(data) def getTitle(self): #override return 'temp_data' def insertData(self, temp): print("temp", temp) data = Temperature() data.temperature = temp data.time = datetime.now() data.save() return 'succeed' <file_sep>#!/usr/bin/env bash export DEBUG_MODE=False echo '===DEBUG MODE====' echo $DEBUG_MODE echo '=================' sudo /etc/init.d/redis-server stop sudo /etc/init.d/redis-server start ./make-migrations-and-migrate sudo service postgresql stop sudo service postgresql start cd .. && python3 manage.py test cd script python3 ../backup_thread.py & python3 ../updateCache_thread.py & gunicorn -c ../gunicorn_config/config.py django_project.wsgi <file_sep># Generated by Django 3.0.8 on 2020-09-28 00:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0004_timeprice'), ] operations = [ migrations.CreateModel( name='GrowthRate', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('plant_id', models.IntegerField(blank=True, null=True)), ('rate', models.FloatField(blank=True, default=None, null=True)), ], options={ 'verbose_name_plural': 'GrowthRate', }, ), ] <file_sep># secure_data > We have a data loader which loads untracked json file of secure data to prevent sensitive data exposure in repository such as secret key, stmp account/password etc. ## Setup #### 1. Create a json file named secure_data.json in this directory #### 2. Copy the text in secure_data_example.json and fill in custom parameters <file_sep>// Area Chart Example var ctx4 = document.getElementById("chart_backup_mem"); var chart4 = new Chart(ctx4, { type:'line', data: { labels: connections_data_backup_mem['timestamp_array'], datasets: [{ lineTension: 0, label: 'Number of Connections', data: connections_data_backup_mem['connections_array'], backgroundColor: [ 'rgba(255, 99, 132, 0.2)', ], borderColor: [ 'rgba(255,99,132,1)', ], borderWidth: 1 }] }, options:{ elements: { point: { radius: 0 } }, tooltips: {enabled: false}, hover: {mode: null}, maintainAspectRatio:false, legend:{ display:false }, title:{}, scales:{ xAxes:[ { ticks:{ fontColor:'#858796', padding:20, autoSkip: true, maxTicksLimit: 20, } } ], yAxes:[ { gridLines:{ color:'rgb(234, 236, 244)', zeroLineColor:'rgb(234, 236, 244)', drawBorder:false, drawTicks:false, borderDash:[2], zeroLineBorderDash:[2] }, ticks:{ fontColor:'#858796', padding:5, suggestedMin: 0, suggestedMax: 5 } } ] } } }); setInterval(function(){ $.ajax({ headers: {'X-CSRFToken': csrftoken}, type: "POST", url: update_connection_path, data: {'server_name': 'BackupMemPercentage'}, }).done(function(response){ chart4 = new Chart(ctx4, { type:'line', data: { labels: JSON.parse(response['data'])['timestamp_array'], datasets: [{ lineTension: 0, label: 'Number of Connections', data: JSON.parse(response['data'])['connections_array'], backgroundColor: [ 'rgba(255, 99, 132, 0.2)', ], borderColor: [ 'rgba(255,99,132,1)', ], borderWidth: 1 }] }, options:{ elements: { point: { radius: 0 } }, tooltips: {enabled: false}, hover: {mode: null}, maintainAspectRatio:false, legend:{ display:false }, title:{}, scales:{ xAxes:[ { ticks:{ fontColor:'#858796', padding:20, autoSkip: true, maxTicksLimit: 20, } } ], yAxes:[ { gridLines:{ color:'rgb(234, 236, 244)', zeroLineColor:'rgb(234, 236, 244)', drawBorder:false, drawTicks:false, borderDash:[2], zeroLineBorderDash:[2] }, ticks:{ fontColor:'#858796', padding:5, suggestedMin: 0, suggestedMax: 5 } } ] } } }); }) }, 60000); <file_sep>from django.db import models from django.contrib.auth.models import User from .AuthGroup import AuthGroup class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True) activated = models.BooleanField(default=False) permission = models.OneToOneField(AuthGroup, blank=True, null=True, on_delete=models.SET_NULL) def __str__(self): return str(self.user.email) class Meta: verbose_name_plural = 'Profile' <file_sep>from django.http import HttpResponse from ..models import * import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from secure_data.secure_data_loader import SecureDataLoader @method_decorator(csrf_exempt, name='dispatch') class UpdateCameraTask(View): def post(self, request): secure_data_loader = SecureDataLoader() received_data = json.loads(request.body.decode("utf-8")) print(received_data) if('raspberry_secret_key' in received_data and received_data['raspberry_secret_key'] == secure_data_loader.secure_data['RASPBERRY_SECRET_KEY']): taskHandler = CameraTaskStatusHander() taskHandler.updateStatusData(received_data['status']) return HttpResponse(status) else: return HttpResponse('wrong secret key') <file_sep>from datetime import datetime, date import json from ...models import * from termcolor import colored import base64 import numpy as np import cv2 class ImageHandler(): def __init__(self): self.raw_data = None self.image = None self.now = None self.received_data = None def setNow(self, now): self.now = now def receiveEncodedRawData(self, request): self.received_data = json.loads(request.body.decode("utf-8")) self.raw_data = self.received_data['image'] # encoding decoding processing self.raw_data = self.raw_data.encode("utf-8") # print(raw_data) def decodeRawDataToImage(self): imgString = base64.b64decode(self.raw_data) np_array = np.fromstring(imgString, np.uint8) # print(np_array) self.image = cv2.imdecode(np_array, cv2.IMREAD_COLOR) def updatePlantData(self): django_path = '../' # original location: django_project/script image_dir = 'monitor_app/static/data_image/' image_name = self.now.strftime("%Y_%m_%d_")+str(self.received_data['id'])+'.jpg' print(self.image) cv2.imwrite(django_path+image_dir+image_name, self.image) print(colored('[VIEW LOG] receiveImage - Image saved.', 'yellow', attrs=['bold'])) if(PlantData.objects.filter(image_url=image_name).exists() == False): plant_data = PlantData() plant_data.aruco_id = self.received_data['id'] plant_data.image_url = image_name plant_data.type = "N/A" plant_data.growth_rate = 0.0 plant_data.seed_date = datetime.strptime('2020-10-29', '%Y-%m-%d').date() plant_data.data_date = date.today() plant_data.status = "N/A" plant_data.save() print(colored('[VIEW LOG] receiveImage - PlantData saved.', 'yellow', attrs=['bold'])) def store3dContructImage(self): django_path = '../' # original location: django_project/script image_dir = 'monitor_app/static/data_3dConstruction_image/' image_name = self.now.strftime("%Y_%m_%d_")+str(self.received_data['id'])+'.jpg' cv2.imwrite(django_path+image_dir+image_name, self.image) print(colored('[VIEW LOG] store3dContructImage - Image saved.', 'yellow', attrs=['bold'])) <file_sep># Generated by Django 3.0.8 on 2020-11-09 13:20 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0017_auto_20201108_1745'), ] operations = [ migrations.AddField( model_name='messagelog', name='type', field=models.CharField(default='', max_length=25), ), ] <file_sep># Generated by Django 3.0.8 on 2020-09-28 00:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0005_growthrate'), ] operations = [ migrations.AddField( model_name='growthrate', name='time', field=models.DateTimeField(null=True), ), ] <file_sep>class FloatConverter: regex = '[\d\.\d]+' def to_python(self, value): return float(value) def to_url(self, value): return '{}'.format(value) <file_sep>from django.http import HttpResponse from django.views.decorators.csrf import csrf_exempt from ..models import * from datetime import datetime, date import json from termcolor import colored from django.utils.decorators import method_decorator import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View from secure_data.secure_data_loader import SecureDataLoader @method_decorator(csrf_exempt, name='dispatch') class Receive3dContructImage(View): def post(self, request): secure_data_loader = SecureDataLoader() received_data = json.loads(request.body.decode("utf-8")) if('raspberry_secret_key' in received_data and received_data['raspberry_secret_key'] == secure_data_loader.secure_data['RASPBERRY_SECRET_KEY']): now = datetime.now() imgHandler = ImageHandler() imgHandler.setNow(now) imgHandler.receiveEncodedRawData(request) imgHandler.decodeRawDataToImage() imgHandler.store3dContructImage() return HttpResponse('Succeed') else: return HttpResponse('wrong rasp secret key') <file_sep>from django.urls import path, include from . import views from . import converters from django.urls import register_converter register_converter(converters.FloatConverter, 'float') urlpatterns = [ path('', views.Dashboard.as_view(), name='dashboard'), path('reconstruction', views.Reconstruction.as_view(), name='reconstruction'), path('accounts/login/', # replace auth.accounts.login views.Login.as_view(), name='login'), path('accounts/logout/', views.Logout.as_view(), name='logout'), path('register', views.Register.as_view(), name='register'), path('forgot_password', views.ForgotPassword.as_view(), name='forgot_password'), path('temperature', views.TemperatureReceiver.as_view(), name='temperature'), path('humidity', views.HumidityReceiver.as_view(), name='humidity'), path('receiveImage', views.ReceiveImage.as_view(), name='receiveImage'), path('receive3dContructImage', views.Receive3dContructImage.as_view(), name='receive3dContructImage'), path('activate/<str:uid>/<str:token>', views.Activate.as_view(), name='activate'), path('updateLogMessage', views.UpdateLogMessageView.as_view(), name='updateLogMessage'), path('writeLogMessage', views.LogMessageCreatorView.as_view(), name='writeLogMessage'), path('updateCameraTask', views.UpdateCameraTask.as_view(), name='updateCameraTask'), path('updatePiCpuTemperature', views.UpdatePiCpuTemperature.as_view(), name='updatePiCpuTemperature'), path('updateWarningCount', views.UpdateWarningCount.as_view(), name='updateWarningCount'), path('updateWateringStatus', views.UpdateWateringStatus.as_view(), name='updateWateringStatus'), path('passDoReconsturctionHint', views.PassDoReconsturctionHint.as_view(), name='passDoReconsturctionHint'), path('tables', views.Tables.as_view(), name='tables'), path('sysLog', views.SysLog.as_view(), name='sysLog'), path('connectionsNumReceiver', views.ConnectionsNumReceiver.as_view(), name='connectionsNumReceiver'), path('updateConnectionsView', views.UpdateConnectionsView.as_view(), name='updateConnectionsView'), path('resetPassword/<str:username>/<str:token>', views.ResetPassword.as_view(), name='resetPassword'), path('testFunction', views.TestFunction.as_view(), name='testFunction'), path('updateCache', views.UpdateCache.as_view(), name='updateCache'), ] <file_sep>from django.shortcuts import render, redirect from django.contrib.auth.models import User from ..models import * from django.contrib.auth.tokens import default_token_generator from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.utils.encoding import force_bytes, force_text from django.views import View import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) class Activate(View): def get(self, request, uid, token): authHandler = AuthenticationHandler() authHandler.activate(request, uid, token) contextHandler = ContextHandler() contextHandler.join(authHandler) contextHandler.fillInContext() return render(request, 'template_dashboard/message_template.html', contextHandler.getContext()) <file_sep># Generated by Django 3.0.8 on 2020-10-29 18:24 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0009_authgroup'), ] operations = [ migrations.RenameField( model_name='profile', old_name='activation', new_name='activated', ), migrations.AddField( model_name='profile', name='permission', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='monitor_app.AuthGroup'), ), ] <file_sep>from django.test import TestCase from django.urls import resolve from django.http import HttpRequest from django.test import RequestFactory from django.contrib.auth.models import AnonymousUser import monitor_app.views from monitor_app.views_collection.handlers.PiCpuTempStatusHander import PiCpuTempStatusHander from monitor_app.views_collection.handlers.WateringStatusHander import WateringStatusHander from monitor_app.views_collection.handlers.CameraTaskStatusHander import CameraTaskStatusHander from monitor_app.views_collection.handlers.WarningStatusHander import WarningStatusHander from monitor_app.views_collection.handlers.TemperatureHandler import TemperatureHandler from monitor_app.views_collection.handlers.HumidityHandler import HumidityHandler # test url resolves to dashboard view class DashboardTest(TestCase): def setUp(self): piTempHandler = PiCpuTempStatusHander() piTempHandler.create_fake_data("1000c") wateringHandler = WateringStatusHander() wateringHandler.create_fake_data("DDone") cameraTaskHandler = CameraTaskStatusHander() cameraTaskHandler.create_fake_data("87%") warningHandler = WarningStatusHander() warningHandler.create_fake_data("1000") temperatureHandler = TemperatureHandler() temperatureHandler.insertData(778.3) temperatureHandler.insertData(778.2) humidityHandler = HumidityHandler() humidityHandler.insertData(777.3) humidityHandler.insertData(777.2) def test_root_url_resolves_to_dashboard_view(self): found = resolve('/') self.assertEqual(found.func.view_class, monitor_app.views_collection.Dashboard.Dashboard) def test_root_url_resolves_to_dashboard_view(self): found = resolve('/') self.assertEqual(found.func.view_class, monitor_app.views_collection.Dashboard.Dashboard) def test_piCpuTemperature(self): request = RequestFactory().get('/') request.user = AnonymousUser() view = monitor_app.views_collection.Dashboard.Dashboard.as_view() response = view(request) self.assertIn(b'<span>1000c</span></div>', response.content) def test_wateringStatus(self): request = RequestFactory().get('/') request.user = AnonymousUser() view = monitor_app.views_collection.Dashboard.Dashboard.as_view() response = view(request) self.assertIn(b'<span>DDone</span></div>', response.content) def test_cameraTask(self): request = RequestFactory().get('/') request.user = AnonymousUser() view = monitor_app.views_collection.Dashboard.Dashboard.as_view() response = view(request) self.assertIn(b'<div class="h5 mb-0 mr-3 font-weight-bold text-gray-800">87%</div>', response.content) def test_warningCount(self): request = RequestFactory().get('/') request.user = AnonymousUser() view = monitor_app.views_collection.Dashboard.Dashboard.as_view() response = view(request) self.assertIn(b'<div class="h5 mb-0 font-weight-bold text-gray-800">1000</div>', response.content) def test_temperatureChart(self): request = RequestFactory().get('/') request.user = AnonymousUser() view = monitor_app.views_collection.Dashboard.Dashboard.as_view() response = view(request) self.assertIn(b'[778.3, 778.2]', response.content) def test_humidityChart(self): request = RequestFactory().get('/') request.user = AnonymousUser() view = monitor_app.views_collection.Dashboard.Dashboard.as_view() response = view(request) self.assertIn(b'[777.3, 777.2]', response.content) def test_notLoginRingButton(self): request = RequestFactory().get('/') request.user = AnonymousUser() view = monitor_app.views_collection.Dashboard.Dashboard.as_view() response = view(request) self.assertNotIn(b'id="messageCenter"', response.content) def test_loginRingButton(self): #see functional test pass <file_sep>from ..models import * from termcolor import colored class CheckAuthGroup(): def __init__(self): self.check_list = ["Viewer", "Author"] def system_check(self): for group in self.check_list: try: auth_group = AuthGroup.objects.get(group=group) except AuthGroup.DoesNotExist: print(str(group)+" is not in AuthGroup!") assert len(AuthGroup.objects.all()) == len(self.check_list) print(colored('[OK] CheckAuthGroup system check pass', 'green', attrs=['bold'])) <file_sep>from django.db import models class AuthGroup(models.Model): group = models.CharField(max_length=25) def __str__(self): return str(self.group) class Meta: verbose_name_plural = 'AuthGroup' <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class HumidityHandler(ModelDataHandler): def __init__(self): self.timezone_shift = None self.threshold_timestamp = None def setTimezoneShift(self, timedeltaObject): self.timezone_shift = timedeltaObject print(self.timezone_shift) def setThresholdTimestamp(self, datetimeObject): self.threshold_timestamp = datetimeObject print(self.threshold_timestamp) def getData(self): #override humids = Humidity.objects.filter(time__gte=(self.threshold_timestamp)) data = dict() data['timestamp_array'] = [(humid.time + timedelta(hours=8)).strftime('%m/%d %H:%M') for humid in humids] data['humid_array'] = [humid.humidity for humid in humids] return json.dumps(data) def getTitle(self): #override return 'humid_data' def insertData(self, humid): print("humid", humid) data = Humidity() data.humidity = humid data.time = datetime.now() data.save() return 'succeed' <file_sep># Generated by Django 3.0.8 on 2020-11-06 14:33 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0013_plantdata_date'), ] operations = [ migrations.RemoveField( model_name='plantdata', name='date', ), migrations.AddField( model_name='plantdata', name='data_date', field=models.DateField(null=True), ), migrations.AlterField( model_name='plantdata', name='seed_date', field=models.DateField(null=True), ), ] <file_sep># Generated by Django 3.0.8 on 2020-09-21 22:17 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Temperature', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('temperature', models.FloatField(blank=True, default=None, null=True)), ('time', models.DateTimeField()), ], options={ 'verbose_name_plural': 'temperatures', }, ), ] <file_sep>// Area Chart Example var ctx = document.getElementById("chart_humid"); var chart = new Chart(ctx, { type:'line', data: { labels: humid_data['timestamp_array'], datasets: [{ label: 'Humidity', data: humid_data['humid_array'], backgroundColor: [ 'rgba(255, 99, 132, 0.2)', ], borderColor: [ 'rgba(255,99,132,1)', ], borderWidth: 1 }] }, options:{ maintainAspectRatio:false, legend:{ display:false }, title:{}, scales:{ xAxes:[ { ticks:{ fontColor:'#858796', padding:20, autoSkip: true, maxTicksLimit: 20, } } ], yAxes:[ { gridLines:{ color:'rgb(234, 236, 244)', zeroLineColor:'rgb(234, 236, 244)', drawBorder:false, drawTicks:false, borderDash:[2], zeroLineBorderDash:[2] }, ticks:{ fontColor:'#858796', padding:5, suggestedMin: 0, suggestedMax: 100 } } ] } } }); <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from .ModelDataHandler import ModelDataHandler from ...models import * from django.shortcuts import render, redirect from django.contrib.auth.models import User from django.contrib.auth.tokens import default_token_generator from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.utils.encoding import force_bytes, force_text from django.contrib.auth import authenticate, login as auth_login from django.contrib.auth import logout as auth_logout class AuthenticationHandler(ModelDataHandler): def __init__(self): self.status = None self.login_flag = False self.user_exits = False self.user_object_queryset_list = None self.repeat_password_is_same = None self.register_completed = False self.email_is_same = None def check_same_password(self, request): password = request.POST.get('password', '') repeat_password = request.POST.get('repeat_password', '') if(password == repeat_password): self.repeat_password_is_same = True else: self.repeat_password_is_same = False self.status = 'Repeat Password is not the same as Password!' def pwd_repeatPwd_is_same(self): return self.repeat_password_is_same def check_same_username(self, request): email = request.POST.get('email', '') if User.objects.filter(username=email).exists(): self.email_is_same = True else: self.email_is_same = False def has_username_exists(self): return self.email_is_same def createUserAndProfile(self, request): email = request.POST.get('email', '') password = request.POST.get('password', '') user = User.objects.create_user(username=email, email=email, password=password) user.save() profile = Profile() profile.user = user profile.activation = False profile.save() return user def resetPassword(self, request, uid, token): new_password = request.POST.get('password', '') user = None uid = force_text(urlsafe_base64_decode(uid)) try: user = User.objects.get(pk = uid) except User.DoesNotExist: user = None if user and default_token_generator.check_token(user, token): user.set_password(<PASSWORD>) user.save() def checkUidToken(self, uid, token): user = None uid = force_text(urlsafe_base64_decode(uid)) try: user = User.objects.get(pk=uid) except User.DoesNotExist: user = None ERROR("activate.py : User does not exist!") if user and default_token_generator.check_token(user, token): return True else: return False def activate(self, uid, token): user = None uid = force_text(urlsafe_base64_decode(uid)) try: user = User.objects.get(pk = uid) print(user) except User.DoesNotExist: user = None ERROR("activate.py : User does not exist!") if user and default_token_generator.check_token(user, token): profile = Profile.objects.get(user = user) profile.activated = True profile.save() self.status = "Your account has been successfully activated!" def login(self, request): username = request.POST.get('email', '') password = request.POST.get('password', '') user = authenticate(request, username=username, password=password) if user is not None: auth_login(request, user) self.login_flag = True else: self.login_flag = False self.status = 'Login Failed' def logout(self, request): auth_logout(request) self.login_flag = False def forgot_password(self, request): email = request.POST.get('email', '') print("email", email) self.user_object_queryset_list = User.objects.filter(email=email) print("associated_user", self.user_object_queryset_list) def getForgotPwdUserObjectList(self): return self.user_object_queryset_list def getData(self): return self.status def getTitle(self): return 'status_message' def has_loggedin(self): return self.login_flag def updateStatus(self, status): self.status = status <file_sep>from django.forms import ModelForm, TextInput, DateTimeInput, Textarea from django import forms from .models import * class TemperatureForm(ModelForm): class Meta: model = Temperature exclude = ['temperature', 'time'] <file_sep>import os import json secuere_data_path = os.path.dirname(os.path.abspath(__file__))+"/secure_data.json" # print(secuere_data_path) class SecureDataLoader(): def __init__(self): with open(secuere_data_path, "r") as file: self.secure_data = json.load(file) # print(self.secure_data) if __name__ == '__main__': scdLoader = SecureDataLoader() <file_sep># Generated by Django 3.0.8 on 2020-11-11 13:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0018_messagelog_type'), ] operations = [ migrations.CreateModel( name='TaskStatus', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('task_name', models.CharField(max_length=25)), ('status', models.CharField(max_length=25)), ], options={ 'verbose_name_plural': 'TaskStatus', }, ), ] <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * from django.db import connection class PlantTableHandler(ModelDataHandler): def getData(self): # WARNING # You should be very careful whenever you write raw SQL. # Every time you use it, you should properly escape any parameters that # the user can control by using params in order to protect against SQL injection attacks. # Please read more about SQL injection protection. plant_table_title_list = list() plant_table_row_list = list() plant_table_title_list.append("") with connection.cursor() as cursor: cursor.execute("select distinct data_date from monitor_app_plantdata order by data_date") dates = cursor.fetchall() for date in dates: plant_table_title_list.append(date[0].strftime('%m/%d')) print(plant_table_title_list) cursor.execute("select distinct aruco_id from monitor_app_plantdata order by aruco_id") aruco_ids = cursor.fetchall() for aruco_id in aruco_ids: cursor.execute("select image_url from monitor_app_plantdata where aruco_id=%s order by data_date", [aruco_id[0]]) image_urls = cursor.fetchall() image_urls = [url[0] for url in image_urls] image_urls.insert(0, str(aruco_id[0])) plant_table_row_list.append(image_urls) return json.dumps({ 'title': plant_table_title_list, 'data': plant_table_row_list}) def getTitle(self): return 'plant_table' <file_sep># Generated by Django 3.0.8 on 2020-09-26 15:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0003_cputemperature'), ] operations = [ migrations.CreateModel( name='TimePrice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField()), ('price', models.FloatField(blank=True, default=None, null=True)), ('product', models.CharField(max_length=25)), ], options={ 'verbose_name_plural': 'TimePrice', }, ), ] <file_sep>from django.shortcuts import render from ..models import * from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from django.db import connection from django.views import View from django.http import HttpResponse import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) class ResetPassword(View): def get(self, request, username, token): print(username, token) authHandler = AuthenticationHandler() if authHandler.checkUidToken(username, token): self.token = token self.username = username return render(self.request, 'template_selfdone/reset_password.html', {}) else: return HttpResponse(status=500) def post(self, request, username, token): authHandler = AuthenticationHandler() authHandler.resetPassword(request, username, token) authHandler.updateStatus("Password has been reset, Please login again!") contextHandler = ContextHandler() contextHandler.join(authHandler) contextHandler.fillInContext() return render(request, 'template_dashboard/message_template.html', contextHandler.getContext()) <file_sep>let mountains = table_data['plants']; let path = table_data['path']; function generateTableHead(table, data) { let thead = table.createTHead(); let row = thead.insertRow(); for (let key of data) { let th = document.createElement("th"); let text = document.createTextNode(key); th.appendChild(text); row.appendChild(th); } } function generateTable(table, data) { for (let element of data) { // console.log(element) let row = table.insertRow(); for (key in element) { if(key=="Image"){ let cell = row.insertCell(); let a = document.createElement("a"); a.setAttribute("href", "#"); var img = document.createElement('img'); img.src = path+element[key]; img.style.width = "15%"; img.style.height = "15%"; img.setAttribute("onclick","function()"); img.setAttribute("id", "plant-img"); a.appendChild(img) cell.appendChild(a); // <img src={% static "2020jav_10_29_1.jpg" %} width="70%" height="70%"> } else{ let cell = row.insertCell(); let text = document.createTextNode(element[key]); cell.appendChild(text); } } } } let table = document.querySelector("#dataTable"); // querySelector("#id") let data = Object.keys(mountains[0]); generateTable(table, mountains); generateTableHead(table, data); <file_sep>from django.shortcuts import render from ..models import * from django.contrib.auth.tokens import default_token_generator from django.contrib.auth.models import User import sys import os from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib from django.template.loader import render_to_string from django.utils.http import urlsafe_base64_encode from django.utils.encoding import force_bytes # path = "...." # directory - django_project (outest one) from secure_data.secure_data_loader import SecureDataLoader from django.views import View import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) class ForgotPassword(View): def post(self, request): secure_data_loader = SecureDataLoader() sender = MailSender() authHandler = AuthenticationHandler() authHandler.forgot_password(request) if authHandler.getForgotPwdUserObjectList().exists(): for user in authHandler.getForgotPwdUserObjectList(): sender.setSubject("Password Reset Requested") sender.setSmtpAccount(secure_data_loader.secure_data['SMTP_ACCOUNT']) sender.setSendTo(user.email) sender.setEmailTemplate("../templates/password_reset_email_template.txt") sender.setConfig({ "email":user.email, 'domain':secure_data_loader.secure_data['DOMAIN'], 'site_name': 'Website', "uid": urlsafe_base64_encode(force_bytes(user.pk)), "user": user, 'token': default_token_generator.make_token(user), 'protocol': 'https', }) sender.sendMail(secure_data_loader.secure_data['SMTP_ACCOUNT'], secure_data_loader.secure_data['SMTP_PASSWORD']) authHandler.updateStatus("Please click on the that has just been sent to your email account to change your password.") contextHandler = ContextHandler() contextHandler.join(authHandler) contextHandler.fillInContext() return render(request, 'template_dashboard/message_template.html', contextHandler.getContext()) else: authHandler.updateStatus("Please click on the that has just been sent to your email account to change your password.") contextHandler = ContextHandler() contextHandler.join(authHandler) contextHandler.fillInContext() return render(request, 'template_dashboard/message_template.html', contextHandler.getContext()) def get(self, request): if not request.user.is_authenticated: context = {} return render(request, 'template_dashboard/forgot_password.html', context) else: context = { 'status_message': "You have already logged in." } return render(request, 'template_dashboard/message_template.html', context) <file_sep>import abc class ModelDataHandler(metaclass=abc.ABCMeta): @abc.abstractmethod def getData(self): return NotImplemented @abc.abstractmethod def getTitle(self): return NotImplemented <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from .MessageCenterHandler import MessageCenterHandler from ...models import * from django.views.decorators.csrf import csrf_exempt # Inherit MessageCenterHandler class UpdateMessageHandler(MessageCenterHandler): def getData(self): unread_log_msg_num = int(self.request.POST.get('unread_num')) # Description: When call this view from ajax, # Filter out all unread messages and update to "read", # And render the code piece to respond ajax, # And then replace new html messagelog_data = list() now = datetime.now() messagelog_list = MessageLog.objects.filter(time__lte=now).order_by('-time')[:unread_log_msg_num+5] for log in messagelog_list: time_delta = now - log.time.replace(tzinfo=None) messagelog_data.append({ 'delta_time': self.convertTimeDeltaToDayHourMinString(time_delta), 'title': log.title, 'log': log.log, 'type': log.type, }) MessageLog.objects.filter(time__lte=now).update(read=True) # print(messagelog_data) return messagelog_data <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class ContextHandler(): def __init__(self): self.data_handler_list = list() self.context = {} def join(self, dataHandler): self.data_handler_list.append(dataHandler) def fillInContext(self): for data in self.data_handler_list: self.context[data.getTitle()] = data.getData() def clearContext(self): self.context = {} def getContext(self): return self.context <file_sep># WARNING before running the script below, make sure the db account has the permission # to create database (because in testing, django creates a db and destroy it after testing) # > python3 manage.py test # the command to allow account to create db # > sudo -u postgres psql # > ALTER USER <django-db-account> CREATEDB; import glob from os.path import dirname, basename, join tests_collection = glob.glob(join(dirname(__file__), "tests_collection", "*.py")) for f in tests_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("tests_collection", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) <file_sep>function clickCount(){ click_msgbell_count++; // console.log(click_msgbell_count); // remove red unread hint let msgCenterNav = document.querySelector("#alertsDropdown"); let unreadCountSpan = document.querySelector("#unread-red-message-count") if(unreadCountSpan != null){ msgCenterNav.removeChild(unreadCountSpan); } if(click_msgbell_count <= 1){ // show unread message board let h6 = document.createElement("h6"); h6.setAttribute("class", "dropdown-header"); let text = document.createTextNode("Alerts Center"); h6.appendChild(text); var msgCenter = document.querySelector("#messageCenter"); while(msgCenter.firstChild){ msgCenter.removeChild(msgCenter.firstChild); } msgCenter.appendChild(h6); for(let data of messagelog_data['messagelog_array']){ let a = document.createElement("a"); a.setAttribute("class", "dropdown-item d-flex align-items-center"); a.setAttribute("href", "#"); let div1 = document.createElement("div"); div1.setAttribute("class", "mr-3"); let div2 = document.createElement("div"); if(data['type']=="LOG"){ div2.setAttribute("class", "icon-circle bg-primary"); let i = document.createElement("i"); i.setAttribute("class", "fas fa-file-alt text-white"); div2.appendChild(i); } else if(data['type']=="WARNING"){ div2.setAttribute("class", "icon-circle bg-warning"); let i = document.createElement("i"); i.setAttribute("class", "fas fa-exclamation-triangle text-white"); div2.appendChild(i); } div1.appendChild(div2); a.appendChild(div1); let div3 = document.createElement("div"); let div4 = document.createElement("div"); div4.setAttribute("class", "small text-gray-500"); let text1 = document.createTextNode(data['delta_time']); div4.appendChild(text1); div3.appendChild(div4); if(data['read']){ let text2 = document.createTextNode(data['log']); div3.appendChild(text2); } else{ let span = document.createElement("span"); span.setAttribute("class", "font-weight-bold"); let text2 = document.createTextNode(data['log']); span.appendChild(text2); div3.appendChild(span); } a.appendChild(div3); msgCenter.appendChild(a); } // // ajax pass read value back to django // for(let data of messagelog_data['messagelog_array']){ // data['read'] = true; // } } else if(click_msgbell_count >1){ // console.log("unread_red_message_number") // console.log(messagelog_data['unread_red_message_number']) $.ajax({ headers: {'X-CSRFToken': csrftoken}, type: "POST", url: messagelog_data['update_message_path'], data: {'unread_num': messagelog_data['unread_red_message_number']}, }).done(function(respose){ $("#messageCenter").html(respose); }) } } <file_sep>from django.contrib import admin from .models import * import os import glob from os.path import dirname, basename, join models_collection = [file for file in os.listdir(join(dirname(__file__), "models_collection")) if file.endswith(".py")] for f in models_collection: import_script =\ """\ admin.site.register({0})\ """.format(f[:-3]) # print(import_script) exec (import_script) <file_sep>import datetime import threading import time from termcolor import colored import signal import os import json import requests import sys sys.path.append("..") from secure_data.secure_data_loader import SecureDataLoader secure_data_loader = SecureDataLoader() stop = False def signal_handler(signal, frame): global stop print_str = '[] Ctrl+C KeyboardInterupt' print(colored(print_str, 'blue', attrs=['bold'])) stop = True def job(): while not stop: # Do Update Cache print(colored('[Cache LOG] UpdateCache process starts', 'blue', attrs=['bold'])) data = { 'raspberry_secret_key': secure_data_loader.secure_data['RASPBERRY_SECRET_KEY'] } headers = {'content-type': 'application/json'} r = requests.post("https://plantmonitor.mooo.com/updateCache", data=json.dumps(data), headers=headers) print(colored('[Cache LOG] UpdateCache process finished', 'blue', attrs=['bold'])) time.sleep(60) if __name__ == '__main__': t = threading.Thread(target = job) t.start() print(colored('[Cache LOG] Thread process starts', 'blue', attrs=['bold'])) signal.signal(signal.SIGINT, signal_handler) signal.pause() <file_sep># Generated by Django 3.0.8 on 2020-09-21 23:35 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0001_initial'), ] operations = [ migrations.CreateModel( name='Humidity', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('humidity', models.FloatField(blank=True, default=None, null=True)), ('time', models.DateTimeField()), ], options={ 'verbose_name_plural': 'humidity', }, ), migrations.AlterModelOptions( name='temperature', options={'verbose_name_plural': 'temperature'}, ), ] <file_sep>from django.shortcuts import render from ..models import * from django.views.decorators.csrf import csrf_exempt import json from datetime import datetime, timedelta import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View from django.http import JsonResponse class UpdateConnectionsView(View): def post(self, request): server_name = request.POST.get('server_name') now = datetime.now() threshold_timestamp = now - timedelta(hours=48) connHandler = ConnectionHandler() connHandler.setQueryServerName(server_name) connHandler.setTitle("data") connHandler.setTimezoneShift(timedelta(hours=8)) connHandler.setThresholdTimestamp(threshold_timestamp) contextHandler = ContextHandler() contextHandler.join(connHandler) contextHandler.fillInContext() return JsonResponse(contextHandler.getContext()) <file_sep>from django.shortcuts import render, redirect from ..models import * from django.contrib.auth.tokens import default_token_generator from django.contrib.auth.models import User import sys import os from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib from django.template.loader import render_to_string from django.utils.http import urlsafe_base64_encode from django.utils.encoding import force_bytes from django.views.decorators.csrf import csrf_exempt path = "...." # directory - django_project (outest one) from secure_data.secure_data_loader import SecureDataLoader from django.views import View import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) import requests class Register(View): @csrf_exempt def post(self, request): secure_data_loader = SecureDataLoader() authHandler = AuthenticationHandler() contextHandler = ContextHandler() contextHandler.join(authHandler) authHandler.check_same_password(request) if not authHandler.pwd_repeatPwd_is_same(): contextHandler.fillInContext() return render(request, 'template_dashboard/register.html', contextHandler.getContext()) authHandler.check_same_username(request) if not authHandler.has_username_exists(): ''' Begin reCAPTCHA validation ''' recaptcha_response = request.POST.get('g-recaptcha-response') url = 'https://www.google.com/recaptcha/api/siteverify' values = { 'secret': secure_data_loader.secure_data['RECAPTCHA_PRIVATE_KEY'], 'response': recaptcha_response } verify_rs = requests.get(url, params=values, verify=True) verify_rs = verify_rs.json() print(verify_rs) ''' End reCAPTCHA validation ''' ''' Account length check start ''' email_check_flag = True email_user = request.POST.get('email', '').split('@') # TODO use regular expression for substring in email_user: if len(substring)>20: email_check_flag = False if not email_user[0].isalnum(): email_check_flag = False print("email", email_user) ''' Account length check end ''' print("verify_rs['success']", verify_rs['success'], "email_check_flag", email_check_flag ) if verify_rs['success'] and email_check_flag: user = authHandler.createUserAndProfile(request) sender = MailSender() sender.setSubject("Activate Account") sender.setSmtpAccount(secure_data_loader.secure_data['SMTP_ACCOUNT']) sender.setSendTo(user.email) sender.setEmailTemplate("../templates/activation_email_template.txt") sender.setConfig({ "email":user.email, 'domain':secure_data_loader.secure_data['DOMAIN'], 'site_name': 'Website', "uid": urlsafe_base64_encode(force_bytes(user.pk)), "user": user, 'token': default_token_generator.make_token(user), 'protocol': 'https', }) sender.sendMail(secure_data_loader.secure_data['SMTP_ACCOUNT'], secure_data_loader.secure_data['SMTP_PASSWORD']) authHandler.updateStatus("Please click on the that has just been sent to your email account to verify your email and continue the registration process.") contextHandler.fillInContext() return render(request, 'template_dashboard/message_template.html', contextHandler.getContext()) else: print("reCAPTCHA failed") authHandler.updateStatus("reCAPTCHA failed") contextHandler.fillInContext() return render(request, 'template_dashboard/register.html', contextHandler.getContext()) else: authHandler.updateStatus("The email already exists!") contextHandler.fillInContext() return render(request, 'template_dashboard/register.html', contextHandler.getContext()) def get(self, request): context = { 'status_message': 'Create an Account' } return render(request, 'template_dashboard/register.html', context) <file_sep>from django.shortcuts import render, redirect from django.contrib.auth import authenticate, logout as auth_logout from ..models import * import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View class Logout(View): def get(self, request): authHandler = AuthenticationHandler() authHandler.logout(request) return redirect('/') <file_sep>from django.http import HttpResponse from ..models import * import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View from django.core.cache import cache class TestFunction(View): def post(self, request): return HttpResponse('123') def get(self, request): return HttpResponse('gggggg') <file_sep># Generated by Django 3.0.8 on 2020-11-08 09:37 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('monitor_app', '0015_messagelog'), ] operations = [ migrations.AddField( model_name='messagelog', name='read', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='messagelog', name='group', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ] <file_sep>from django.http import HttpResponse from ..models import * import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View from django.core.cache import cache from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from secure_data.secure_data_loader import SecureDataLoader import requests import os @method_decorator(csrf_exempt, name='dispatch') class UpdateCache(View): def post(self, request): secure_data_loader = SecureDataLoader() received_data = json.loads(request.body.decode("utf-8")) print(received_data) if('raspberry_secret_key' in received_data and received_data['raspberry_secret_key'] == secure_data_loader.secure_data['RASPBERRY_SECRET_KEY']): now = datetime.now() threshold_timestamp = now - timedelta(hours=48) connHandler = ConnectionHandler() connHandler.setQueryServerName("Backup") connHandler.setTitle("connections_data") connHandler.setTimezoneShift(timedelta(hours=8)) connHandler.setThresholdTimestamp(threshold_timestamp) connHandler.writeCacheData(cache) connHandler_private_server = ConnectionHandler() connHandler_private_server.setQueryServerName("PrivateServer") connHandler_private_server.setTitle("connections_data_private_server") connHandler_private_server.setTimezoneShift(timedelta(hours=8)) connHandler_private_server.setThresholdTimestamp(threshold_timestamp) connHandler_private_server.writeCacheData(cache) connHandler_backup_cpu = ConnectionHandler() connHandler_backup_cpu.setQueryServerName("BackupCpuPercentage") connHandler_backup_cpu.setTitle("connections_data_backup_cpu") connHandler_backup_cpu.setTimezoneShift(timedelta(hours=8)) connHandler_backup_cpu.setThresholdTimestamp(threshold_timestamp) connHandler_backup_cpu.writeCacheData(cache) connHandler_backup_mem = ConnectionHandler() connHandler_backup_mem.setQueryServerName("BackupMemPercentage") connHandler_backup_mem.setTitle("connections_data_backup_mem") connHandler_backup_mem.setTimezoneShift(timedelta(hours=8)) connHandler_backup_mem.setThresholdTimestamp(threshold_timestamp) connHandler_backup_mem.writeCacheData(cache) connHandler_webserver_cpu = ConnectionHandler() connHandler_webserver_cpu.setQueryServerName("WebServerCpuPercentage") connHandler_webserver_cpu.setTitle("connections_data_webserver_cpu") connHandler_webserver_cpu.setTimezoneShift(timedelta(hours=8)) connHandler_webserver_cpu.setThresholdTimestamp(threshold_timestamp) connHandler_webserver_cpu.writeCacheData(cache) connHandler_webserver_mem = ConnectionHandler() connHandler_webserver_mem.setQueryServerName("WebServerMemPercentage") connHandler_webserver_mem.setTitle("connections_data_webserver_mem") connHandler_webserver_mem.setTimezoneShift(timedelta(hours=8)) connHandler_webserver_mem.setThresholdTimestamp(threshold_timestamp) connHandler_webserver_mem.writeCacheData(cache) return HttpResponse('Conn Data Cache Updated!') else: return HttpResponse('wrong secret key') <file_sep>import datetime import schedule import threading import time import threading import time def job(): while True: print(time.ctime()) time.sleep(5) if __name__ == '__main__': t = threading.Thread(target = job) t.start() print("hey") <file_sep>from django.apps import AppConfig class MonitorAppConfig(AppConfig): name = 'monitor_app' def ready(self): #check AuthGroup from .system_check.check_model_AuthGroup import CheckAuthGroup check = CheckAuthGroup() check.system_check() <file_sep>from django.shortcuts import render, redirect from django.contrib.auth import authenticate, login as auth_login from ..models import * import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) from django.views import View class Login(View): def get(self, request): context = { 'status_message': 'Welcome Back' } return render(request, 'template_dashboard/login.html', context) def post(self, request): authHandler = AuthenticationHandler() print(authHandler.has_loggedin) authHandler.login(request) contextHandler = ContextHandler() contextHandler.join(authHandler) if(authHandler.has_loggedin()): return redirect('/') else: contextHandler.fillInContext() return render(request, 'template_dashboard/login.html', contextHandler.getContext()) <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class ConnectionHandler(ModelDataHandler): def __init__(self): self.timezone_shift = None self.threshold_timestamp = None self.server_name = None self.title = "" self.useCacheFlag = False self.cache = None def setQueryServerName(self, server_name): self.server_name = server_name def setTimezoneShift(self, timedeltaObject): self.timezone_shift = timedeltaObject print(self.timezone_shift) def setThresholdTimestamp(self, datetimeObject): self.threshold_timestamp = datetimeObject print(self.threshold_timestamp) def getData(self): #override if self.useCacheFlag and self.cache.get(self.title): return self.cache.get(self.title) # dumped-json else: conns = Connections.objects.filter(time__gte=(self.threshold_timestamp), server_name=self.server_name).order_by('time') data = dict() data['timestamp_array'] = [(conn.time + self.timezone_shift).strftime('%m/%d %H:%M') for conn in conns] data['connections_array'] = [conn.number for conn in conns] return json.dumps(data) def setTitle(self, name): self.title = name def getTitle(self): #override return self.title def insertData(self, server_name, number): print("server_name", server_name) print("number", number) data = Connections() data.server_name = server_name data.number = number data.time = datetime.now() data.save() return 'succeed' def writeCacheData(self, cache): conns = Connections.objects.filter(time__gte=(self.threshold_timestamp), server_name=self.server_name).order_by('time') data = dict() data['timestamp_array'] = [(conn.time + self.timezone_shift).strftime('%m/%d %H:%M') for conn in conns] data['connections_array'] = [conn.number for conn in conns] cache.set(self.title, json.dumps(data)) def useCacheData(self, cache): self.useCacheFlag = True self.cache = cache <file_sep>command = '/usr/local/bin/gunicorn' pythonpath = '/home/user/Desktop/django_project/' bind = '0.0.0.0:8000' workers = 9 <file_sep>from django.db import models class Temperature(models.Model): temperature = models.FloatField(null=True, blank=True, default=None) time = models.DateTimeField() def __str__(self): return 'temperature' class Meta: verbose_name_plural = 'temperature' <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class PlantDataHandler(ModelDataHandler): def getData(self): # override # WARNING # You should be very careful whenever you write raw SQL. # Every time you use it, you should properly escape any parameters that # the user can control by using params in order to protect against SQL injection attacks. # Please read more about SQL injection protection. sql_command = '\ SELECT * FROM monitor_app_plantdata \ WHERE data_date IN (SELECT max(data_date) FROM monitor_app_plantdata) \ ORDER BY aruco_id ASC; \ ' plant_array = list() for plant in PlantData.objects.raw(sql_command): plant_array.append({ 'Id': plant.aruco_id, 'Image': plant.image_url, 'Type': plant.type, 'Data Date': plant.data_date.strftime('%m/%d'), 'Seed Date': plant.seed_date.strftime('%m/%d'), 'Status': plant.status, 'Growth_rate': plant.growth_rate }) data = dict() data['plants'] = plant_array return json.dumps(data) def getTitle(self): return 'plants_data' <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class MessageCenterHandler(ModelDataHandler): def __init__(self, request): self.request = request self.now = None def setNow(self,datetimeObject): self.now = datetimeObject def convertTimeDeltaToDayHourMinString(self,delta_time): day_str = str(delta_time.days)+"d " if delta_time.days is not 0 else "" hour_str = str(delta_time.seconds//3600)+"h " if delta_time.seconds//3600 is not 0 else "" min_str = str((delta_time.seconds//60)%60)+"m " if (delta_time.seconds//60)%60 is not 0 else "" is_just_now = True if delta_time.days is 0 and (delta_time.seconds//60)%60 is 0 else False return_str = "" if(is_just_now): return "In a minute" else: return day_str+hour_str+min_str+"ago" def getData(self): # Message center message = list() unread_log_msg_num = 0 if(self.request.user.is_authenticated): current_user = User.objects.get(username = self.request.user.username) # print(current_user) profile = Profile.objects.get(user = current_user) # print(profile.activated) if(profile.activated): unread_log_msg_num = len(MessageLog.objects.filter(user = current_user, read=False)) log_msg = MessageLog.objects.filter(user = current_user).order_by('-time')[:unread_log_msg_num+5] # print(log_msg) for log in log_msg: time_delta = self.now - log.time.replace(tzinfo=None) # print(log.time) message.append({ 'delta_time': self.convertTimeDeltaToDayHourMinString(time_delta), 'title': log.title, 'type': log.type, 'log': log.log, 'read': log.read }) messagelog_data = dict() # print("unread_log_msg_num", unread_log_msg_num) messagelog_data['unread_red_message_number'] = unread_log_msg_num messagelog_data['messagelog_array'] = message return json.dumps(messagelog_data) def getTitle(self): return 'messagelog_data' def createAuthorLogMessage(self, title, msg, type): group = AuthGroup.objects.get(group="Author") profiles = Profile.objects.filter(permission=group) print(group) print(profiles) now = datetime.now() for profile in profiles: message = MessageLog() message.user = profile.user message.time = now message.title = title message.log = msg message.read = False message.type = type message.save() # update warning count if(type=="WARNING"): self.countWarningMessage() def countWarningMessage(self): task = TaskStatus.objects.get(task_name="WARNING COUNT") task.status = str(int(task.status)+1) task.save() <file_sep># Generated by Django 3.0.8 on 2020-10-29 15:43 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('monitor_app', '0007_profile'), ] operations = [ migrations.RenameField( model_name='profile', old_name='verification', new_name='activation', ), ] <file_sep>import sys sys.path.append("..") from secure_data.secure_data_loader import SecureDataLoader secure_data_loader = SecureDataLoader() if __name__ == '__main__': from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText content = MIMEMultipart() content["subject"] = "Learn Code With Mike" content["from"] = secure_data_loader.secure_data['SMTP_ACCOUNT'] content["to"] = "<EMAIL>" content.attach(MIMEText("Demo python send email\nasasdfas")) import smtplib with smtplib.SMTP(host="smtp.gmail.com", port="587") as smtp: try: smtp.ehlo() smtp.starttls() smtp.login( secure_data_loader.secure_data['SMTP_ACCOUNT'], secure_data_loader.secure_data['SMTP_PASSWORD']) smtp.send_message(content) print("Complete!") except Exception as e: print("Error message: ", e) <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from .ModelDataHandler import ModelDataHandler from ...models import * class WateringStatusHander(ModelDataHandler): def getData(self): #Status data watering_status = TaskStatus.objects.get(task_name="WATERING STATUS") return { 'title': watering_status.task_name, 'status': watering_status.status } def getTitle(self): return 'watering_status_data' def updateStatusData(self, statusData): task = TaskStatus.objects.get(task_name="WATERING STATUS") task.status = statusData task.save() def create_fake_data(self, status): task = TaskStatus() task.task_name = "WATERING STATUS" task.status = status task.save() <file_sep>let msgCenterNav = document.querySelector("#alertsDropdown"); if(messagelog_data['unread_red_message_number']>3){ let span = document.createElement("span"); span.setAttribute("class", "badge badge-danger badge-counter"); span.setAttribute("id", "unread-red-message-count"); let text = document.createTextNode("3+"); span.appendChild(text); msgCenterNav.appendChild(span); } else if(messagelog_data['unread_red_message_number']>0) { let span = document.createElement("span"); span.setAttribute("class", "badge badge-danger badge-counter"); span.setAttribute("id", "unread-red-message-count"); let text = document.createTextNode(messagelog_data['unread_red_message_number']); span.appendChild(text); msgCenterNav.appendChild(span); } <file_sep>from django.db import models class PlantData(models.Model): aruco_id = models.IntegerField(blank=False, null=False, default=-1) image_url = models.CharField(max_length=100, null=True) type = models.CharField(max_length=25, null=True) growth_rate = models.FloatField(null=True, blank=True, default=None) seed_date = models.DateField(null=True) data_date = models.DateField(null=True) status = models.CharField(max_length=100, null=True) def __str__(self): return str(self.image_url) class Meta: verbose_name_plural = 'PlantData' <file_sep>from django.db import models class Humidity(models.Model): humidity = models.FloatField(null=True, blank=True, default=None) time = models.DateTimeField() def __str__(self): return 'humidity' class Meta: verbose_name_plural = 'humidity' <file_sep>from django.db import models class Connections(models.Model): server_name = models.CharField(max_length=25) number = models.IntegerField(blank=True, null=True) time = models.DateTimeField() def __str__(self): return str(server_name) class Meta: verbose_name_plural = 'connections' <file_sep>from django.shortcuts import render from datetime import datetime, timedelta, date import json from django.contrib.auth.models import User from django.views import View import glob from os.path import dirname, basename, join handlers_collection = glob.glob(join(dirname(__file__), "handlers", "*.py")) for f in handlers_collection: import_script =\ """\ from .{0}.{1} import *\ """.format("handlers", basename(f[:-3]).replace('/', '.')) # print(import_script) exec (import_script) class Dashboard(View): def get(self, request): now = datetime.now() threshold_timestamp = now - timedelta(hours=8) tempHandler = TemperatureHandler() tempHandler.setTimezoneShift(timedelta(hours=8)) tempHandler.setThresholdTimestamp(threshold_timestamp) humidHandler = HumidityHandler() humidHandler.setTimezoneShift(timedelta(hours=8)) humidHandler.setThresholdTimestamp(threshold_timestamp) planthandler = PlantDataHandler() messageHandler = MessageCenterHandler(self.request) messageHandler.setNow(now) piCpuTempHander = PiCpuTempStatusHander() wateringStatusHandler = WateringStatusHander() cameraSatausHander = CameraTaskStatusHander() warningStatusHandler = WarningStatusHander() contextHandler = ContextHandler() contextHandler.join(tempHandler) contextHandler.join(humidHandler) contextHandler.join(planthandler) contextHandler.join(messageHandler) contextHandler.join(piCpuTempHander) contextHandler.join(wateringStatusHandler) contextHandler.join(cameraSatausHander) contextHandler.join(warningStatusHandler) contextHandler.fillInContext() return render(self.request, 'template_dashboard/dashboard.html', contextHandler.getContext())
66d960af9ac0d61912807c026758869aa8dacb02
[ "Shell", "Markdown", "JavaScript", "Python", "CSS" ]
87
Shell
ArthurWuTW/django_project
5ed209ae1f8ae7262770f0f7f163ae70c74df57d
0ef174e1ffab3cd3655da14dc2d945e16e675cf7
refs/heads/master
<file_sep><%= erb :'/partials/_header' %> <h1>Edit or Delete an author</h1> <form action="/authors/<%= @author['id'] %>" method="POST"> <input type='hidden' name='_method' value="PUT"> <input type="text" name="name" value="<%= @author["name"] %>" id="name" autofocus> <br> <input type="submit" value='Change'> </form> <form action="/authors/<%= @author['id'] %>" method="POST"> <input type="hidden" name="_method" value="DELETE"> <input type="submit" value="Delete"> </form> <%= erb :'/partials/_footer' %><file_sep><%= erb :'/partials/_header' %> <h1>This Author</h1> <%=@author["name"]%> <%= erb :'/partials/_footer' %><file_sep><%= erb :'/partials/_header'%> <h1>Author List</h1> <% @authors.each do |author| %> <p><%= author["name"] %></p> <p> <a href="/authors/<%= author['id'] %>">Show More info</a> <a href="/authors/<%= author['id'] %>/edit">Edit</a> </p> <% end %> <p> <a href="/authors/new"> Add a New Author </a> </p> <%= erb :'/partials/_footer' %>
83510da96debd019bb7a2c77b9e84164b58b6e06
[ "HTML+ERB" ]
3
HTML+ERB
willsimmons/SQLTest
08a93512e1ea468d9ca3d2e800024db8dfa88f9f
377ddc3c2046bf9322d605643c71c82fcc790467
refs/heads/master
<file_sep>#freestyle { margin: 0; font-family: Arial, Helvetica, sans-serif; background-color: rgb(80, 79, 79); } .sidenav{ box-sizing: border-box; height: 100%; width: 200px; position: fixed; z-index: 1; top: 0; left: 0; background-color: aqua; overflow: hidden; } .sidenav a { color:black; padding: 16px; text-decoration: none; display: block; font-weight: bold; font-size: 25px; } .sidenav a:hover{ background-color: rgb(80, 79, 79); color:aqua; } .content{ margin-left: 200px; padding-left: 20px; background-color: rgb(80, 79, 79); color: aqua; font-size: 25px; } marquee { font-family: Georgia, 'Times New Roman', Times, serif; color:aqua; background-color: rgb(80, 79, 79); } footer{ position: fixed; bottom: 0; width: 100%; background-color:aqua; color: black; text-align: center; font-family: 'Trebuchet MS', 'Lucida Sans Unicode', 'Lucida Grande', 'Lucida Sans', Arial, sans-serif; } .container{ border:1px solid aqua; margin-left: 200px; height:320px ; width:10o%; }<file_sep>function myFunction() { document.getElementById("fancy").innerHTML =" This is javascript!!!"; } function getDate() { document.getElementById("date").innerHTML= Date(); } function theStatus() { alert("Home page has loaded"); } function alarm() { word = "H20" alert( word + " is the way to go!!!"); } var d = Date(); document.getElementById("fancy").innerHTML= Date(); <file_sep># website Front and Back End code of website with javascript
100a183831703781c6d40927b198729f0cd9017a
[ "Markdown", "JavaScript", "CSS" ]
3
Markdown
rwaters210/website
f342d5ff9d20b4858ce483e3ee4df1d015ae42ab
9250231c02db7b22267a46efca61afe6440a926c
refs/heads/develop
<repo_name>timothystewart6/opensource-portal<file_sep>/views/org/leave.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content div.container //- Conditions for this page: // - already an organization member h1 We'll miss you! p.lead Are you sure that you want to leave the #{org.name} organization on GitHub? p Please carefully review this page. <strong>Data loss is possible</strong> if you have forks of private organization repos. p ul li GitHub account #{user.github.username} will be dropped from the org immediately li Your private forks of repos from #{org.name} will be deleted by GitHub li Work done in a private fork of a repo from #{org.name} will be lost form(method='post') p(style='margin-top:36px') input.btn.btn-primary.btn-lg(type='submit', value='Remove ' + user.github.username + ' from ' + org.name) | &nbsp;&nbsp;&nbsp; a.btn.btn-default.btn-lg(href=org.baseUrl) Cancel <file_sep>/routes/diagnostics.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const router = express.Router(); const redacted = '*****'; router.get('/', (req, res) => { let config = req.app.settings.runtimeConfig; let safeUserView = { cookies: req.cookies, sessionId: req.session.id, sessionIndex: `${config.redis.prefix}.session:${req.session.id}`, user: {}, }; if (req.user && req.user.github) { let github = {}; for (let key in req.user.github) { let val = req.user.github[key]; if (key === 'accessToken') { val = redacted; } github[key] = val; } safeUserView.user.github = github; } if (req.user && req.user.azure) { let azure = {}; for (let key in req.user.azure) { let val = req.user.azure[key]; if (key === 'accessToken') { val = redacted; } azure[key] = val; } safeUserView.user.azure = azure; } for (let key in req.session) { if (typeof req.session[key] !== 'object') { safeUserView[key] = req.session[key]; } } safeUserView.websiteHostname = process.env.WEBSITE_HOSTNAME; return res.render('message', { message: 'My information', messageTiny: 'This information might be useful in helping diagnose issues.', messageOutput: JSON.stringify(safeUserView, undefined, 2), user: req.user, config: config, corporateLinks: config.corporate.trainingResources['public-homepage'], serviceBanner: config && config.serviceMessage ? config.serviceMessage.banner : undefined, title: 'Open Source Portal for GitHub - ' + config.brand.companyName }); }); module.exports = router; <file_sep>/views/org/2fa.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content div.container if twoFactorOff === true if notValidated && user.github h1 2FA is still not enabled for #{user.github.displayName || user.github.username}... else h1 2FA is not enabled p.lead This GitHub org requires multi-factor authentication. Let's set it up now. p If you already have an Authenticator app, this step takes <strong>2 minutes</strong>. If you need to install and configure an app for the first time, this will likely take <strong>5-10 minutes</strong>. This multi-factor setup is separate from your corporate authentication. div.alert.alert-gray(role='alert') if notValidated strong Your GitHub account is still not protected with MFA else strong Two-factor auth is not turned on for your GitHub account p. Please enable 2FA on GitHub.com. if notValidated p As of #{nowString} UTC, the GitHub API reports that your account is not as secure as it can be. By using a multi-factor app on your mobile device or signing up for SMS authentication messages, your account can be much more secure. ul.list-inline li a.btn.btn-primary(href='https://github.com/settings/two_factor_authentication/configure', target='_new') Configure 2FA <i class="glyphicon glyphicon-share-alt"></i> li a.btn.btn-success(href=org.baseUrl + 'security-check?validate=validate' + (onboarding ? '&onboarding=' + onboarding : '')) Validate 2FA and Continue li a.btn.btn-default(href='/unlink') Cancel my corporate participation include twoFactorInstructions else h1 Two-factor security is enabled p. Thanks for helping to keep the organization secure. p a.btn.btn-primary(href=org.baseUrl) Go to the #{org.name} portal <file_sep>/app.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const app = express(); app.disable('x-powered-by'); require('debug')('oss-initialize')('loading express application'); const initialize = require('./middleware/initialize'); app.initializeApplication = initialize.bind(undefined, app, express, __dirname); module.exports = app; <file_sep>/jobs/reports/fileCompression.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const async = require('async'); const fs = require('fs'); const tmp = require('tmp'); const zlib = require('zlib'); function deflateFile(inputFilename, outputFilename, callback) { const gzip = zlib.createGzip(); const input = fs.createReadStream(inputFilename); const output = fs.createWriteStream(outputFilename); input.pipe(gzip).pipe(output); output.on('finish', callback); } function getTempFilenames(count, callback) { const filenames = []; async.whilst( () => filenames.length !== count, (next) => { tmp.tmpName((tempGenerationError, tempPath) => { if (tempGenerationError) { return next(tempGenerationError); } filenames.push(tempPath); next(); }); }, function (error) { if (error) { return callback(error); } callback(null, filenames); }); } module.exports.writeDeflatedTextFile = function writeDeflatedText(text, callback) { // The callback will be the deflated temporary filename, removed after the process exits. getTempFilenames(2, (tempFilesError, filenames) => { if (tempFilesError) { return callback(tempFilesError); } const intermediate = filenames[0]; const deflatedPath = filenames[1]; // Direct piping was crashing in the past so using two temporary files for robustness. fs.writeFile(intermediate, text, (writeError) => { if (writeError) { return callback(writeError); } deflateFile(intermediate, deflatedPath, (deflateError) => { if (deflateError) { return callback(deflateError); } callback(null, deflatedPath); }); }); }); }; module.exports.deflateFile = deflateFile; <file_sep>/webhooks/organizationProcessor.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn", "log", "dir"] }] */ 'use strict'; const _= require('lodash'); const async = require('async'); const crypto = require('crypto'); const secureCompare = require('secure-compare'); const tasks = require('./tasks/'); module.exports = function (options, callback) { const operations = options.operations; if (!operations) { return callback(new Error('No operations instance provided')); } const organization = options.organization; const event = options.event; if (!organization || !organization.name) { return callback(new Error('Missing organization instance')); } if (!event) { return callback(new Error('Missing event')); } if (!event.body) { return callback(new Error('Missing event body')); } const body = event.body; const rawBody = event.rawBody || JSON.stringify(body); const properties = event.properties; if (!properties || !properties.delivery || !properties.signature || !properties.event) { return callback(new Error('Missing event properties - delivery, signature, and/or event')); } verifySignatures(properties.signature, organization.webhookSharedSecrets, rawBody, (validationError) => { if (validationError) { if (operations && operations.insights) { const possibleOrg = body && body.organization ? body.organization.login : 'unknown-org'; console.warn(`incorrect hook signature - ${possibleOrg} organization`); operations.insights.trackMetric('WebhookIncorrectSecrets', 1); operations.insights.trackEvent('WebhookIncorrectSecret', { org: possibleOrg, delivery: properties.delivery, event: properties.event, signature: properties.signature, approximateTime: properties.started.toISOString(), computedHash: validationError.computedHash, }); } return callback(validationError); } // In a bus scenario, if a short timeout window is used for queue // visibility, a client may want to acknowledge this being a valid // event at this time. After this point however there is no // guarantee of successful execution. if (options.acknowledgeValidEvent) { options.acknowledgeValidEvent(); } let interestingEvents = 0; const work = _.filter(tasks, (processor) => { return processor.filter(event); }); if (work.length > 0) { ++interestingEvents; console.log(`[* interesting event found: ${event.properties.event} (${work.length} interested tasks)]`); } else { console.log(`[skipping event: ${event.properties.event}]`); } async.eachSeries(work, (processor, next) => { try { processor.run(operations, organization, event, next); } catch (processInitializationError) { console.log('Processor ran into an error with an event:'); console.dir(processInitializationError); return next(processInitializationError); } }, (error) => { return callback(error, interestingEvents); }); }); }; function verifySignatures(signature, hookSecrets, rawBody, callback) { // To ease local development and simple scenarios, if no shared secrets are // configured, they are not required. if (!hookSecrets || !hookSecrets.length) { return callback(); } if (!signature) { return callback(new Error('No event signature was provided')); } const computedSignatures = []; for (let i = 0; i < hookSecrets.length; i++) { const sharedSecret = hookSecrets[i]; const sha1 = crypto.createHmac('sha1', sharedSecret); sha1.update(rawBody, 'utf8'); const computedHash = 'sha1=' + sha1.digest('hex'); if (secureCompare(computedHash, signature)) { return callback(); } computedSignatures.push(computedHash); } const validationError = new Error('The signature could not be verified'); validationError.statusCode = 401; validationError.computedHash = computedSignatures.join(', '); return callback(validationError); } <file_sep>/test/mailProvider.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const assert = require('chai').assert; const mailProvider = require('../lib/mailProvider/'); const fakeMailProviderName = 'mockMailService'; const executive = '<EMAIL>'; const developer = '<EMAIL>'; function createMailConfig() { return { logging: { version: '1', }, mail: { provider: fakeMailProviderName, from: '<EMAIL>', overrideRecipient: undefined, customService: { url: 'url', apiKey: 'key', version: 'prototype', } }, }; } describe('mailProvider', () => { describe('factory', () => { it('can create a factory by configuration', () => { const config = createMailConfig(); mailProvider(config, (error, provider) => { assert.isDefined(provider, 'provider is created'); }); }); it('overriding to works', () => { const config = createMailConfig(); config.mail.overrideRecipient = developer; mailProvider(config, (error, provider) => { const mail = { to: executive, }; provider.sendMail(mail, (sendMail, receipt) => { assert.isDefined(receipt, 'mail is sent'); const messages = provider.getSentMessages(); assert.strictEqual(messages.length, 1, 'one message was sent'); const message = messages[0]; assert.equal(message.id, receipt, 'message ID matches'); assert.equal(message.to, developer, 'overridden e-mail address is used for TO:'); }); }); }); it('mock send mail works', () => { const config = createMailConfig(); mailProvider(config, (error, provider) => { const mail = { to: executive, }; provider.sendMail(mail, (sendMail, receipt) => { assert.isDefined(receipt, 'mail is sent'); const messages = provider.getSentMessages(); assert.strictEqual(messages.length, 1, 'one message was sent'); const message = messages[0]; assert.equal(message.id, receipt, 'message ID matches'); assert.equal(message.to, executive, 'intended receipient was sent the message'); }); }); }); it('reports basic provider info and version properties', () => { const config = createMailConfig(); mailProvider(config, (error, provider) => { assert.isTrue(provider.info.includes(fakeMailProviderName), 'provider self-registers correctly'); }); }); it('throws an error when the provider is not supported', () => { const config = createMailConfig(); config.mail.provider = 'providerDoesNotExist'; mailProvider(config, (error, provider) => { assert.isDefined(error, 'provider did not exist, error set'); assert.isUndefined(provider, 'provider was not created'); }); }); }); }); <file_sep>/lib/github/core.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["dir"] }] */ 'use strict'; const _ = require('lodash'); const debug = require('debug')('oss-github'); const uuid = require('node-uuid'); const moment = require('moment'); const Q = require('q'); const cost = require('./cost'); const delayBeforeRefreshMilliseconds = 1000; // --- Core REST client cache functionality --- function internalCall(apiContext, callback) { return getCachedMetadata(apiContext) .then(apiContext.pipeline.processMetadataBeforeCall.bind(null, apiContext)) .then((metadata) => { return apiContext.pipeline.withMetadataShouldCacheBeServed(apiContext, metadata) .then((shouldCacheBeServedImmediately) => { const displayKey = apiContext.redisKey ? apiContext.redisKey.root + ' ' : ''; if (shouldCacheBeServedImmediately === true || shouldCacheBeServedImmediately.cache === true) { if (metadata) { const innerMessage = shouldCacheBeServedImmediately && shouldCacheBeServedImmediately.remaining ? shouldCacheBeServedImmediately.remaining : ''; debug(`OK cache ${displayKey}data: ${innerMessage}`); } ++apiContext.cost.github.cacheHits; return getCachedResult(apiContext, shouldCacheBeServedImmediately); } else { if (metadata) { // console.dir(metadata); } debug(`API GET : ${displayKey}`); return apiContext.pipeline.callApi(apiContext) .then(processResponse.bind(null, apiContext)); } }) .then(result => { callback(null, result); }, callback); }); } function processResponse(apiContext, response) { return apiContext.pipeline.withResponseUpdateMetadata(apiContext, response) .then(apiContext.pipeline.withResponseShouldCacheBeServed.bind(null, apiContext, response)) .then((isCacheOk) => { if (isCacheOk === true) { ++apiContext.cost.github.cacheHits; return getCachedResult(apiContext); } ++apiContext.cost.github.usedApiTokens; return apiContext.pipeline.getResponseMetadata(apiContext, response).then((metadata) => { if (metadata) { return cacheResponseAsync(apiContext, response); // callback will happen after caching } else { finish(apiContext); return finalizeResult(apiContext, response); } }); }); } function finish(apiContext) { if (apiContext && apiContext.pipeline && apiContext.pipeline.finish) { apiContext.pipeline.finish(apiContext); } } // --- Caching integration with metadata/responses --- function getCachedMetadata(apiContext) { if (apiContext.metadata || apiContext.etag) { return; } const redisKey = apiContext.redisKey.metadata; if (!redisKey) { throw new Error('No Redis key provided in apiContext.redisKey.metadata'); } return apiContext.libraryContext.redis.getObjectAsync(redisKey) .then(recordRedisCost(apiContext, 'get')); } function tryGetCachedResult(apiContext) { const key = redisKeyBodyVersion(apiContext); if (apiContext.libraryContext.memoryCache.has(key)) { ++apiContext.cost.local.cacheHits; return Q(apiContext.libraryContext.memoryCache.get(key)); } ++apiContext.cost.local.cacheMisses; return apiContext.libraryContext.redis.getObjectCompressedAsync(redisKeyBodyVersion(apiContext)) .then(recordRedisCost(apiContext, 'get')) .then(storeLocalResult.bind(null, apiContext)); } function getCachedResult(apiContext, optionalCacheDecisions) { return tryGetCachedResult(apiContext) .then(result => { if (result) { result.meta = apiContext.metadata; if (optionalCacheDecisions && optionalCacheDecisions.refresh === true) { backgroundRefreshAsync(apiContext, apiContext.metadata); } else { slideObjectExpirationWindow(apiContext).then(finish(apiContext)); } return finalizeResult(apiContext, result); } ++apiContext.cost.redis.cacheMisses; delete apiContext.etag; return apiContext.pipeline.callApi(apiContext).then(processResponse.bind(null, apiContext)); }); } function backgroundRefreshAsync(apiContext, currentMetadata) { // Potential data loss/consistency problem: upsert/overwrite let refreshing = moment().utc().format(); let refreshId = uuid(); currentMetadata.refreshing = refreshing; currentMetadata.refreshId = refreshId; apiContext.generatedRefreshId = refreshId; debug(`refresh in the background starting for ${apiContext.redisKey.metadata} was updated ${apiContext.metadata.updated} and seconds of ${apiContext.maxAgeSeconds}`); return apiContext.libraryContext.redis.setObjectWithExpireAsync(apiContext.redisKey.metadata, currentMetadata, apiContext.pipeline.cache.minutes.longtermMetadata) .then(() => { // Remove the values in case the refresh uses the metadata delete currentMetadata.refreshing; delete currentMetadata.refreshId; }) .then(recordRedisCost(apiContext, 'set')) .delay(delayBeforeRefreshMilliseconds) .then(apiContext.pipeline.callApi.bind(null, apiContext)) .then(processResponse.bind(null, apiContext)) .catch(exp => { console.dir(exp); }); } function cacheResponseAsync(apiContext, response) { const kickoffAsyncWork = () => { return storeLocalResult(apiContext, response) .then(storeResult(apiContext, response)) .then(storeMetadata(apiContext, response)) .then(reduceObjectExpirationWindow(apiContext, response)) .catch((err) => { if (err) { console.dir(err); } }).done(() => { return finish(apiContext); }); }; kickoffAsyncWork(); return finalizeResult(apiContext, response); } function finalizeResult(apiContext, response) { // If there are situations where you do not want the cost shared // back the API context could be customized here. if (response) { response.cost = apiContext.cost; } return response; } // --- Caching --- function reduceObjectExpirationWindow(apiContext, response) { if (!apiContext.etag || (apiContext.etag && apiContext.etag === response.meta.etag)) { return; } debug('Expiring older cached response'); return apiContext.libraryContext.redis.expireAsync( redisKeyBodyVersion(apiContext, apiContext.etag), apiContext.pipeline.cache.minutes.acceleratedExpiration) .then(recordRedisCost(apiContext, 'expire')); } function slideObjectExpirationWindow(apiContext) { if (!apiContext.etag) { return undefined; } return apiContext.libraryContext.redis.expireAsync( redisKeyBodyVersion(apiContext, apiContext.etag), apiContext.pipeline.cache.minutes.longtermResponse) .then(recordRedisCost(apiContext, 'expire')); } function storeMetadata(apiContext, response) { const reducedMetadata = apiContext.pipeline.reduceMetadataToCacheFromResponse(apiContext, response); return apiContext.libraryContext.redis.setObjectWithExpireAsync( apiContext.redisKey.metadata, reducedMetadata, apiContext.pipeline.cache.minutes.longtermMetadata) .then(recordRedisCost(apiContext, 'set')); } function storeLocalResult(apiContext, response) { return new Q.Promise(function(resolve) { if (response) { const key = redisKeyBodyVersion(apiContext, response && response.meta ? response.meta.etag : undefined); apiContext.libraryContext.memoryCache.set(key, response); } resolve(response); }); } function storeResult(apiContext, response) { return apiContext.libraryContext.redis.setObjectCompressedWithExpireAsync( redisKeyBodyVersion(apiContext, response.meta.etag), response, apiContext.pipeline.cache.minutes.longtermResponse) .then(recordRedisCost(apiContext, 'set')); } function redisKeyAspectSuffix(aspect) { return aspect ? `:${aspect}` : ''; } function redisKeyBodyVersion(apiContext, etag) { const tag = etag || apiContext.etag; if (!tag) { throw new Error('A cached result cannot be retrieved without an etag value.'); } const strippedTag = tag.replace(/"/g, ''); const root = apiContext.redisKey.root; if (!root) { throw new Error('No Redis key root provided in API context apiContext.redisKey.root'); } return root + redisKeyAspectSuffix(`body@${strippedTag}`); } function recordRedisCost(apiContext, type) { if (!type) { throw new Error('No type defined for recordRedisCost.'); } return object => { let hit = object !== undefined; if (type === 'get') { apiContext.cost.redis.cacheHit += hit ? 1 : 0; apiContext.cost.redis.cacheMisses += hit ? 0 : 1; } if (type !== 'get' && type !== 'set' && type !== 'expire') { throw new Error(`The Redis type of ${type} is not configured for storing API costs.`); } apiContext.cost.redis[`${type}Calls`] += 1; return object; }; } function decorateApiContext(apiContext) { // Decorate with expected variables to hold logs, cost if (!apiContext.log) { apiContext.log = []; } if (!apiContext.calledTime) { apiContext.calledTime = moment().utc(); } if (!apiContext.redisKey) { apiContext.redisKey = {}; } if (!apiContext.cost) { apiContext.cost = cost.create(); } return apiContext; } function createContext(api, options) { const apiContext = { api: api, options: options, }; return decorateApiContext(apiContext); } function redisKeyForApi(apiPrefix, api, apiOptions, aspect) { const normalizedOptions = normalizedOptionsString(apiOptions); const aspectSuffix = redisKeyAspectSuffix(aspect); return `${apiPrefix}${api}${normalizedOptions}${aspectSuffix}`; } function normalizedOptionsString(options) { if (!options) { return ''; } const sortedkeys = _.keys(options).sort(); let normalized = []; sortedkeys.forEach((key) => { let value = options[key]; const typeOf = typeof (value); if (typeOf === 'object') { value = normalizedOptionsString(value); } else if (typeOf !== 'string' && typeOf !== 'number' && typeOf !== 'boolean') { throw new Error(`Normalized option ${key} is not a string`); } if (typeOf === 'boolean') { value = value === true ? '1' : '0'; } normalized.push(`${key}=${value}`); }); return `(${normalized.join(',')})`; } module.exports = { execute: internalCall, createContext: createContext, redisKeyAspectSuffix: redisKeyAspectSuffix, redisKeyForApi: redisKeyForApi, delayBeforeRefreshMilliseconds: delayBeforeRefreshMilliseconds, }; <file_sep>/middleware/scrubbedUrl.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // // ---------------------------------------------------------------------------- // Scrub the incoming URL value(s) in the request, replacing tokens and other // secrets. // ---------------------------------------------------------------------------- module.exports = function (req, res, next) { var url = req.originalUrl || req.url; var secretKeys = [ 'code', 'token', ]; for (var i = 0; i < secretKeys.length; i++) { var key = secretKeys[i]; var value = req.query[key]; if (value !== undefined) { url = url.replace(key + '=' + value, key + '=*****'); } } req.scrubbedUrl = url; next(); }; <file_sep>/middleware/lowercaser.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = function (params) { return function (req, res, next) { // lowercase parameters Object.getOwnPropertyNames(req.params).forEach((param) => { if (params.indexOf(param) > -1) { req.params[param] = lowerCaser(req.params[param]); } }); // lowercase query string Object.getOwnPropertyNames(req.query).forEach((query) => { if (params.indexOf(query) > -1) { req.query[query] = lowerCaser(req.query[query]); } }); next(); }; }; function lowerCaser(param) { if (typeof param === 'string') { return param.toLowerCase(); } if (Array.isArray(param)) { return param.map((item) => { return item.toLowerCase(); }); } return param; } <file_sep>/middleware/requireSecureAppService.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn"] }] */ // ---------------------------------------------------------------------------- // If this portal is deployed to Azure App Service, let's make sure that they // are connecting over SSL by validating the load balancer headers. If they are // not, redirect them. Keys off of WEBSITE_SKU env variable that is injected. // Also supports an additional expected certificate from the load balancer when // using custom certificates. // ---------------------------------------------------------------------------- module.exports = function (req, res, next) { var config = req.app.settings.runtimeConfig; if (!req.headers['x-arr-ssl']) { return next(new Error('No "x-arr-ssl" header, yet this app has been deployed to App Service. Please have an administrator investigate.')); } var arr = req.headers['x-arr-ssl']; var expectedHeaders = [ '2048|256|C=US, S=Washington, L=Redmond, O=Microsoft Corporation, OU=Microsoft IT, CN=Microsoft IT SSL SHA2|CN=*.azurewebsites.net' ]; if (config.webServer.expectedSslCertificate) { expectedHeaders.push(config.webServer.expectedSslCertificate); } var isLegit = false; for (var i = 0; i < expectedHeaders.length; i++) { if (arr === expectedHeaders[i]) { isLegit = true; } } if (!isLegit) { console.warn(`The SSL connection may not be secured via Azure App Service. Please contact the site sponsors to investigate: ${arr}`); } // We are no longer throwing here as it affects the load balancers. // if (isLegit === false) { // var err = new Error('The SSL connection may not be secured via Azure App Service. Please contact the site sponsors to investigate.'); // err.headers = req.headers; // err.arrHeader = arr; // err.detailed = arr; // return next(err); // } req.app.set('trust proxy', 1); next(); }; <file_sep>/business/teamRepositoryPermission.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; class TeamRepositoryPermission { constructor(team, entity, getToken, operations) { this.team = team; this.permissions = entity.permissions; this.repository = team.organization.repositoryFromEntity(entity); this.id = this.repository.id; const privates = _private(this); privates.getToken = getToken; privates.operations = operations; } get name() { return this.repository.name; } } module.exports = TeamRepositoryPermission; const privateSymbol = Symbol(); function _private(self) { if (self[privateSymbol] === undefined) { self[privateSymbol] = {}; } return self[privateSymbol]; } <file_sep>/jobs/cleanupInvites/task.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn", "dir", "log"] }] */ 'use strict'; const async = require('async'); const moment = require('moment'); const os = require('os'); // Organization invitations cleanup: remove any invitations that are older than a // set period of time from the organization. const maxParallelism = 1; const defaultMaximumInvitationAgeDays = 7; module.exports = function run(started, startedString, config) { console.log(`WebJob started ${startedString}`); const app = require('../../app'); config.skipModules = new Set([ 'ossDbProvider', 'web', ]); app.initializeApplication(config, null, error => { if (error) { throw error; } const insights = app.settings.appInsightsClient; if (!insights) { throw new Error('No app insights client available'); } let maximumInvitationAgeDays = defaultMaximumInvitationAgeDays; if (config.github && config.github.jobs && config.github.jobs.cleanup && config.github.jobs.cleanup.maximumInvitationAgeDays) { maximumInvitationAgeDays = config.github.jobs.cleanup.maximumInvitationAgeDays; } const maximumAgeMoment = moment().subtract(maximumInvitationAgeDays, 'days'); insights.trackEvent('JobOrganizationInvitationsCleanupStarted', { hostname: os.hostname(), maximumDays: maximumInvitationAgeDays.toString(), }); const operations = app.settings.operations; const organizations = operations.getOrganizations(); let removedInvitations = 0; async.eachLimit(organizations, maxParallelism, (organization, next) => { organization.getMembershipInvitations((getInvitationsError, invitations) => { if (getInvitationsError) { return next(getInvitationsError); } if (!invitations || invitations.length === 0) { return next(); } const invitationsToRemove = []; let emailInvitations = 0; for (let i = 0; i < invitations.length; i++) { const invite = invitations[i]; const createdAt = moment(invite.created_at); if (createdAt.isBefore(maximumAgeMoment)) { if (invite.login) { invitationsToRemove.push(invite.login); } else { ++emailInvitations; console.warn(`An e-mail based invitation to ${invite.email} cannot be automatically canceled`); } const data = { createdAt: createdAt.format(), invitedAgo: createdAt.fromNow(), login: invite.login, inviter: invite && invite.inviter && invite.inviter.login ? invite.inviter.login : undefined, role: invite.role, emailInvited: invite.email, }; const eventName = invite.login ? 'JobOrganizationInviteCleanupInvitationNeeded' : 'JobOrganizationInviteCleanupInvitationNotUser'; insights.trackEvent(eventName, data); } } console.log(`Organization ${organization.name} has ${invitationsToRemove.length} expired invitations out of ${invitations.length} total invitations pending`); if (emailInvitations) { console.warn(`Organization ${organization.name} has ${emailInvitations} e-mail based invitations that cannot be canceled through this job`); } async.eachLimit(invitationsToRemove, 1, (login, nextInvite) => { organization.removeMember(login, removeError => { if (removeError) { insights.trackException(removeError); insights.trackEvent('JobOrganizationInvitationsCleanupInvitationFailed', { login: login, message: removeError.message, }); } return nextInvite(); }); }, next); }); }, error => { if (error) { console.dir(error); insights.trackException(error); return process.exit(1); } console.log(`Job finished. Removed ${removedInvitations} expired invitations.`); insights.trackMetric('JobOrganizationInvitationsExpired', removedInvitations); process.exit(0); }); }); }; <file_sep>/views/organization/whois/drop.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content .container h1 Dropped if results h3 Results ul each result in results li= result else p No results. No operations were performed. if entity h3 Link information and/or GitHub user data pre= JSON.stringify(entity, undefined, 2) <file_sep>/views/org/team/join.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content div.container if allowSelfJoin h1 | Join confirmation&nbsp; small=team.name p This team is open to everyone in the organization. Would you like to join this team? form(method='post') input.btn.btn-primary(type='submit', value='Join') | &nbsp; a.btn.btn-default(href=team.org.baseUrl + 'teams') Cancel else h1 | Join Request&nbsp; small= team.name if team.description p.lead= team.description p This form will open a request to join the <em>#{team.name}</em> team. Keep in mind that with the <a href="https://guides.github.com/introduction/flow/" target="_new">GitHub fork and pull request workflow</a>, you may be able to successfully contribute to an effort with just read rights. if teamMaintainers h4 Team Maintainer#{teamMaintainers.length > 1 ? 's' : ''} p Permission decisions are delegated to the following maintainer#{teamMaintainers.length > 1 ? 's' : ''} of the team: p for teamMember in teamMaintainers span.person-tile if teamMember.avatar() img(alt=teamMember.login, src=teamMember.avatar('80'), style='margin-right:10px;width:20px;height:20px', width=20, height=20) a.btn.btn-sm.btn-muted(href='https://github.com/' + teamMember.login, title=teamMember.id, target='_new')= teamMember.login if teamMember.contactEmail() a.btn.btn-sm.btn-muted-more(href='mailto:' + teamMember.contactEmail())= teamMember.contactName() form(method='post') h4 Context & Business Justification p Please provide context for the approvers, including your business justification and any context they may need. Did someone suggest that you join the team? p textarea.form-control(rows=3, name='justification', placeholder='Required: Business Justification') p input.btn.btn-primary(type='submit', value='Submit Team Access Request') | &nbsp; a.btn.btn-default(href=team.org.baseUrl + 'teams') Cancel <file_sep>/resources/less/build.less /*! * opensource-portal v4.2.0 * Homepage: * Copyright 2012-2017 Microsoft Corporation * Licensed under MIT * Based on Bootstrap */ @import "../../bower_components/bootstrap/less/bootstrap.less"; @import "../../bower_components/bootswatch/yeti/variables.less"; @import "../../bower_components/bootswatch/yeti/bootswatch.less"; <file_sep>/routes/org/team/delete.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const router = express.Router(); const teamAdminRequired = require('./teamAdminRequired'); router.post('/', teamAdminRequired, (req, res, next) => { const organization = req.organization; const team2 = req.team2; team2.delete(error => { if (error) { return next(error); } req.oss.saveUserAlert(req, `${team2.name} team deleted`, 'Delete', 'success'); res.redirect('/' + organization.name + '/teams'); }); }); module.exports = router; <file_sep>/views/org/profileReview.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content if true if true div.container div.row div.col-lg-8.col-md-8.col-sm-8 h1 Is your GitHub profile professional? if onboarding p.lead Since you've just linked your #{config.brand.companyName} and GitHub accounts, it's a perfect time to think about how you show up in public on GitHub. p Your interactions in public open source repos (commenting on issues, creating and working with pull requests, contributing source) are all associated with your public GitHub profile. With #{config.brand.companyName}'s open source and social media policies in mind, consider the these questions: ul li Are you using the name you use professionally? li Have you identified #{config.brand.companyName} as your company? li Have you included an e-mail address? li Do you have a professional, smart profile avatar? p &nbsp; p a.btn.btn-primary.btn-lg(href=org.baseUrl + (onboarding ? 'membership?onboarding=' + onboarding : 'membership')) Continue | &nbsp;&nbsp; a.btn.btn-default.btn-lg(href='https://github.com/settings/profile', target='_new') | Edit your profile on GitHub if onboarding p &nbsp; hr h3 Your onboarding progress h5 | Sign in to your GitHub &amp; #{config.brand.companyName} accounts&nbsp; i.glyphicon.glyphicon-ok h5 | Link your accounts&nbsp; i.glyphicon.glyphicon-ok h5 | Join your first GitHub organization&nbsp; i.glyphicon.glyphicon-ok h5 | Multifactor security checkup&nbsp; i.glyphicon.glyphicon-ok h5.text-primary | Profile review h5 | Publish your membership <em>(optional)</em> h5 | Join a team <em>(optional)</em> div.col-lg-4.col-md-4.col-sm-4 div.row div.col-lg-12.col-sm-12.col-md-12.alert-gray h4 YOUR GITHUB PROFILE p img.img-thumbnail(style='max-width:200px', src=userProfile.avatar(400), alt=(userProfile.name || userProfile.login)) span p | GitHub username br strong= userProfile.login span p | Name br strong= userProfile.name span(style=userProfileWarnings.company) p | Company br if userProfile.company strong= userProfile.company else strong No company listed if userProfile.location span p | Location br strong= userProfile.location span(style=userProfileWarnings.email) p | E-mail br if userProfile.email strong= userProfile.email else strong No e-mail provided if userProfile.otherFields.blog span p | Web br a(href=userProfile.otherFields.blog, target='_new') strong= userProfile.otherFields.blog p(style='margin:16px 0') a.btn.btn-default.btn-sm(href='https://github.com/settings/profile', target='_new') | Edit your profile on GitHub <file_sep>/lib/context.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn"] }] */ 'use strict'; const async = require('async'); const debug = require('debug')('azureossportal'); const utils = require('../utils'); const github = require('octonode'); const insights = require('./insights'); const sortBy = require('lodash').sortBy; const Org = require('./org'); const Team = require('./team'); const User = require('./user'); const RedisHelper = require('./redis'); /*eslint no-console: ["error", { allow: ["warn"] }] */ function OpenSourceUserContext(options, callback) { var self = this; self.displayNames = { github: null, azure: null, }; self.usernames = { github: null, azure: null, }; self.avatars = { github: null, }; self.id = { github: null, aad: null, }; self.entities = { link: null, primaryMembership: null, }; self.tokens = { github: null, githubIncreasedScope: null, }; self.githubLibrary = options.githubLibrary; const applicationConfiguration = options.config; const dataClient = options.dataClient; const redisInstance = options.redisClient; let redisHelper = options.redisHelper; const ossDbClient = options.ossDbClient; const link = options.link; this.insights = options.insights; if (this.insights === undefined) { this.insights = insights(); } let modernUser; this.cache = { orgs: {}, users: {}, }; this.modernUser = function () { return modernUser; }; this.createModernUser = function (id, login) { modernUser = new User(this, id); modernUser.login = login; return modernUser; }; this.setting = function (name) { return applicationConfiguration[name]; }; this.dataClient = function () { return dataClient; }; this.redisClient = function () { return redisInstance; }; this.ossDbClient = function () { return ossDbClient; }; this.configuration = applicationConfiguration; this.baseUrl = '/'; if (redisHelper) { this.redis = redisHelper; } else if (applicationConfiguration && applicationConfiguration.redis) { this.redis = new RedisHelper(redisInstance, applicationConfiguration.redis.prefix); } if (link && options.request) { return callback(new Error('The context cannot be set from both a request and a link instance.')); } if (link) { return self.setPropertiesFromLink(link, callback); } if (options.request) { return this.resolveLinkFromRequest(options.request, callback); } callback(new Error('Could not initialize the context for the acting user.'), self); } OpenSourceUserContext.prototype.setPropertiesFromLink = function (link, callback) { this.usernames.github = link.ghu; this.id.github = link.ghid.toString(); this.id.aad = link.aadoid; this.usernames.azure = link.aadupn; this.entities.link = link; this.displayNames.azure = link.aadname; this.avatars.github = link.ghavatar; this.tokens.github = link.githubToken; this.tokens.githubIncreasedScope = link.githubTokenIncreasedScope; var modernUser = this.modernUser(); if (!modernUser && this.id.github) { modernUser = this.createModernUser(this.id.github, this.usernames.github); } modernUser.link = link; callback(null, this); }; function tooManyLinksError(self, userLinks, callback) { const tooManyLinksError = new Error(`This account has ${userLinks.length} linked GitHub accounts.`); tooManyLinksError.links = userLinks; tooManyLinksError.tooManyLinks = true; return callback(tooManyLinksError, self); } function existingGitHubIdentityError(self, link, requestUser, callback) { const endUser = requestUser.azure.displayName || requestUser.azure.username; const anotherGitHubAccountError = new Error(`${endUser}, there is a different GitHub account linked to your corporate identity.`); anotherGitHubAccountError.anotherAccount = true; anotherGitHubAccountError.link = link; anotherGitHubAccountError.skipLog = true; return callback(anotherGitHubAccountError, self); } function redisKeyForLink(authenticationScheme, identifier) { return `user#${authenticationScheme}:${identifier}:link`; } OpenSourceUserContext.prototype.invalidateLinkCache = function (scheme, oid, callback) { if (typeof scheme === 'function' && !callback) { callback = scheme; scheme = this.setting('authentication').scheme; oid = this.id.aad; } if (!oid) { return callback(new Error('No AAD ID is available for the user to invalidate the cache.')); } if (scheme !== 'aad') { return callback(new Error(`The scheme ${scheme} is not supported by the cache system at this time.`)); } invalidateCachedLink(this, scheme, oid, callback); }; function invalidateCachedLink(self, authenticationScheme, identifier, callback) { if (!self.redis) { return callback(new Error('No Redis instance provided to the user context.')); } self.redis.delete(redisKeyForLink(authenticationScheme, identifier), callback); } function tryGetCachedLink(self, authenticationScheme, identifier, callback) { if (!self.redis) { console.warn('No Redis client provided with the context object.'); return callback(); } self.redis.getObject(redisKeyForLink(authenticationScheme, identifier), callback); } function tryCacheLink(self, authenticationScheme, identifier, link, multipleLinksPresent, callback) { if (!self.redis) { console.warn('No Redis client provided with the context object.'); if (callback) return callback(); } if (multipleLinksPresent) { return callback(null, self); } self.redis.setObjectWithExpire(redisKeyForLink(authenticationScheme, identifier), link, 180 /* minutes */, () => { callback(null, self); }); } // ---------------------------------------------------------------------------- // Populate the user's OSS context object. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.resolveLinkFromRequest = function (request, callback) { const self = this; const requestUser = request.user; const scheme = self.setting('authentication').scheme; if (requestUser && requestUser.github) { self.usernames.github = requestUser.github.username; self.id.github = requestUser.github.id; self.displayNames.github = requestUser.github.displayName; self.avatars.github = requestUser.github.avatarUrl; } if (requestUser && requestUser.azure) { self.usernames.azure = requestUser.azure.username; self.displayNames.azure = requestUser.azure.displayName; self.id.aad = requestUser.azure.oid; } if (scheme === 'aad' && requestUser.azure && requestUser.azure.oid) { const getUserCacheStartTime = Date.now(); return tryGetCachedLink(self, 'aad', requestUser.azure.oid, (getCachedLinkError, cachedLink) => { const getUserCacheEndTime = Date.now(); if (self.insights) { self.insights.trackDependency('AzureLinksCache', 'getUserByAadId', getUserCacheEndTime - getUserCacheStartTime, !getCachedLinkError); } if (getCachedLinkError) { return callback(getCachedLinkError); } const selectedId = scheme === 'aad' && request.session && request.session.selectedGithubId ? request.session.selectedGithubId : undefined; const validateAndSetOneLink = (link, next) => { if (!selectedId && requestUser.github && requestUser.github.username && link.ghu !== requestUser.github.username && link.ghid !== requestUser.github.id) { existingGitHubIdentityError(self, link, requestUser, next); } else { self.setPropertiesFromLink(link, next); } }; if (cachedLink) { return validateAndSetOneLink(cachedLink, callback); } const getUserStartTime = Date.now(); self.dataClient().getUserByAadOid(requestUser.azure.oid, function (findError, userLinks) { const getUserEndTime = Date.now(); if (self.insights) { self.insights.trackDependency('AzureLinksTable', 'getUserByAadOid', getUserEndTime - getUserStartTime, !findError); } if (findError) { const wrappedError = utils.wrapError(findError, 'There was a problem trying to load the link from storage.'); if (findError.message) { wrappedError.detailed = findError.message; } return callback(wrappedError, self); } if (userLinks.length === 0) { return callback(null, self); } let selectedLink = null; if (selectedId) { userLinks.forEach((oneLink) => { if (oneLink.ghid === selectedId) { selectedLink = oneLink; } }); if (!selectedLink) { delete request.session.selectedGithubId; } } if (!selectedLink) { if (userLinks.length > 1) { return tooManyLinksError(self, userLinks, callback); } selectedLink = userLinks[0]; } validateAndSetOneLink(selectedLink, (validationError) => { if (validationError) { return callback(validationError, self); } tryCacheLink(self, 'aad', requestUser.azure.oid, selectedLink, selectedId !== undefined, callback); }); }); }); } let userObject; if (self.id.github) { userObject = self.createModernUser(self.id.github, self.usernames.github); } if (!userObject) { return callback(new Error('There\'s a logic bug in the user context object. We cannot continue.'), self); } userObject.getLink(function (error, link) { if (error) { return callback(utils.wrapError(error, 'We were not able to retrieve information about any link for your user account at this time.'), self); } if (link) { return self.setPropertiesFromLink(link, callback); } else { callback(null, self); } }); }; // ---------------------------------------------------------------------------- // SECURITY METHOD: // Determine whether the authenticated user is an Administrator of the org. At // this time there is a special "portal sudoers" team that is used. The GitHub // admin flag is not used [any longer] for performance reasons to reduce REST // calls to GitHub. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.isPortalAdministrator = function (callback) { if (this.configuration.github.debug && this.configuration.github.debug.portalSudoOff) { console.warn('DEBUG WARNING: Portal sudo support is turned off in the current environment'); return callback(null, false); } /* var self = this; if (self.entities && self.entities.primaryMembership) { var pm = self.entities.primaryMembership; if (pm.role && pm.role === 'admin') { return callback(null, true); } } */ const primaryOrg = this.primaryOrg(); let sudoTeam = null; try { sudoTeam = primaryOrg.getPortalSudoersTeam(); } catch (noSudoersTeam) { return callback(null, false); } if (!sudoTeam) { return callback(null, false); } sudoTeam.isMember(function (error, isMember) { if (error) { return callback(utils.wrapError(error, 'We had trouble querying GitHub for important team management ' + 'information. Please try again later or report this issue.')); } callback(null, isMember === true); }); }; // ---------------------------------------------------------------------------- // Create a simple GitHub client. Should be audited, since using this library // directly may result in methods which are not cached, etc. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.createGenericGitHubClient = function () { const ownerToken = this.org().setting('ownerToken'); if (!ownerToken) { throw new Error('No "ownerToken" set for the ' + this.org().name + ' organization.'); } return github.client(ownerToken); }; // ---------------------------------------------------------------------------- // Given a GitHub user ID, get their GitHub profile information. Resilient to // GitHub username changes. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getGithubUsernameFromId = function (id, callback) { this.createGenericGitHubClient().get(`/user/${id}`, (error, s, b, h) => { if (error) { return callback(error.statusCode === 404 ? utils.wrapError(error, `The GitHub user ID ${id} no longer exists on GitHub.com. (404 Not Found)`) : error); } if (s !== 200) { return callback(new Error(`Could not retrieve the GitHub username from the ID ${id}.`)); } else { return callback(null, b.login, h, b); } }); }; // ---------------------------------------------------------------------------- // Make sure system links are loaded for a set of users. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getLinksForUsers = function (list, callback) { const dc = this.dataClient(); async.map(list, function (person, cb) { if (person && person.id) { cb(null, person.id); } else { cb(new Error('No ID known for this person instance.')); } }, function (error, map) { if (error) { return callback(error); } // In large organizations, we will have trouble getting this much data back // all at once. const groups = []; let j = 0; const perGroup = 200; let group = []; for (let i = 0; i < map.length; i++) { if (j++ == perGroup) { groups.push(group); group = []; j = 0; } group.push(map[i]); } if (group.length > 0) { groups.push(group); group = []; } async.each(groups, function (userGroup, cb) { dc.getUserLinks(userGroup, function (error, links) { if (error) { // Specific to problems we've had with storage results... if (error.headers && error.headers.statusCode && error.headers.body) { let oldError = error; error = new Error('Storage returned an HTTP ' + oldError.headers.statusCode + '.'); error.innerError = oldError; } return cb(error); } for (let i = 0; i < list.length; i++) { list[i].trySetLinkInstance(links, true); } cb(); }); }, function (error) { callback(error ? error : null, error ? null : list); }); }); }; // ---------------------------------------------------------------------------- // Translate a list of IDs into developed objects and their system links. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getUsersAndLinksFromIds = function (list, callback) { const self = this; for (let i = 0; i < list.length; i++) { const id = list[i]; list[i] = self.user(id); } self.getLinksForUsers(list, callback); }; // ---------------------------------------------------------------------------- // Translate a hash of IDs to usernames into developed objects, system links // and details loaded. Hash key is username, ID is the initial hash value. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getCompleteUsersFromUsernameIdHash = function (hash, callback) { const self = this; const users = {}; const list = []; for (const key in hash) { const id = hash[key]; const username = key; const user = self.user(id); user.login = username; users[username] = user; list.push(user); } async.parallel([ function (cb) { self.getLinksForUsers(list, cb); }, function (cb) { async.each(list, function (user, innerCb) { user.getDetailsByUsername(function (/* formerUserError */) { // Ignore the user with an error... this means they left GitHub. // TODO: Should anything be done or reacted to in this scenario? innerCb(); }); }, function (error) { cb(error); }); }, ], function (error) { callback(error, users); }); }; // ---------------------------------------------------------------------------- // Returns a list of users pending removal based on the Redis key of the name // "pendingunlinks". // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getPendingUnlinks = function getPendingUnlinks(callback) { var self = this; var dc = this.dataClient(); var redisKey = 'pendingunlinks'; self.redis.getSet(redisKey, function (err, upns) { if (err) { return callback(err); } var links = []; var notFound = []; async.each(upns, function (upn, cb) { dc.getUserByAadUpn(upn, function (err, user) { if (err) { return cb(err); } if (user && user.length && user.length > 0) { for (var i = 0; i < user.length; i++) { links.push(user[i]); } } else { notFound.push(upn); } cb(); }); }, function (error) { callback(error, links, notFound); }); }); }; // ---------------------------------------------------------------------------- // This function is involved and will perform a number of queries across all of // the registered organizations in the portal. It is designed to try to make as // much progress as necessary per participant, so that even if the function has // 50 users to process but can only successfully perform 1 drop, it will get // the 1 drop done and removed from the Redis set. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.processPendingUnlinks = function processPendingUnlinks(callback) { var self = this; this.getPendingUnlinks(function (error, unlinks, unknownUsers) { if (error) { return callback(error); } var history = {}; if (unknownUsers && unknownUsers.length && unknownUsers.length > 0) { history.unknown = unknownUsers; } async.eachSeries(unlinks, function (link, cb) { var upn = link.aadupn; self.processPendingUnlink(link, function (err, info) { if (err) { return cb(err); } if (!history.unlinked) { history.unlinked = {}; } history.unlinked[upn] = info; cb(); }); }, function (error) { callback(error, history); }); }); }; // ---------------------------------------------------------------------------- // Let's promote this person to customer. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.processPendingUnlink = function processPendingUnlink(entity, callback) { var dc = this.dataClient(); var self = this; var redisKey = 'pendingunlinks'; var upn = entity.aadupn; const fixedAuthScheme = 'aad'; const oid = entity.aadoid; const id = entity.ghid; self.getGithubUsernameFromId(id, (getUsernameError, username) => { if (!username) { return callback(new Error(`No username found on GitHub from the ID ${id}.`)); } var history = []; if (username !== entity.ghu) { history.push(`It looks like the GitHub user changed their username. The user ID ${id} today has the GitHub username of "${username}", but previously had the username ${entity.ghu}.`); } var orgsList = self.orgs(); var orgs = []; async.each(orgsList, function (org, cb) { org.queryAnyUserMembership(username, function (err, membership) { if (membership && membership.state) { history.push(`"${username}" has the state "${membership.state}" in the "${org.name}" GitHub organization currently.`); orgs.push(org); } cb(null, membership); }); }, function (queryingError) { if (queryingError) { return callback(queryingError); } // Remove from any orgs now if (orgs.length === 0) { history.push(`"${username}" has no active organization memberships in this environment.`); } async.each(orgs, function (org, cb) { history.push(`Dropping "${username}" from "${org.name}"...`); org.removeUserMembership(username, cb); }, function (error) { if (error) { // Keep the user in the list. history.push(`Error removing at least one org membership: ${error.message}`); return callback(error, history); } // Delete the link history.push('Removing any corporate link for ID ' + entity.ghid + ' username "' + username + '"'); dc.removeLink(entity.ghid, function (error) { if (error) { history.push(`Link remove error (they may not have had a link): ${error.message}`); return callback(error, history); } // Delete any cached link for the user, then remove from the Redis set history.push('Removing any cached link from Redis for "' + upn + '"'); invalidateCachedLink(self, fixedAuthScheme, oid, () => { history.push('Removing pending unlink entry from Redis for "' + upn + '"'); self.redis.removeSetMember(redisKey, upn, function (err) { if (err) { history.push(`Remove pending unlink set member error with Redis: ${err.message}`); return callback(err, history); } callback(null, history); }); }); }); }); }); }); }; // ---------------------------------------------------------------------------- // Retrieve a user's active organization memberships, aggressively cached. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getMyOrganizations = function (callback) { const self = this; const returnSetToInstances = (set) => { const orgs = []; self.orgs().forEach((org) => { if (set.has(org.name.toLowerCase())) { orgs.push(org); } }); return callback(null, sortBy(orgs, 'name')); }; const redisKey = 'user#' + self.id.github + ':orgs:active-memberships'; self.redis.getObjectCompressed(redisKey, (error, orgsList) => { if (!error && orgsList) { return returnSetToInstances(new Set(orgsList)); } self.getOrganizationsWithMembershipStates(true, (error, orgsList) => { if (error) { return callback(error); } const active = []; orgsList.forEach((org) => { if (org.membershipStateTemporary === 'active') { active.push(org.name.toLowerCase()); } }); self.redis.setObjectWithExpire(redisKey, active, 180 /* minutes */, function () { return returnSetToInstances(new Set(active)); }); }); }); }; // ---------------------------------------------------------------------------- // Retrieve all organizations, including a property indicating the membership // state of the user, if any. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getOrganizationsWithMembershipStates = function (allowCaching, callback) { const self = this; if (typeof allowCaching == 'function') { callback = allowCaching; allowCaching = true; } const orgs = []; async.each(self.orgs(), function (org, callback) { org.queryUserMembership(allowCaching, function (error, result) { let state = false; if (result && result.state) { state = result.state; } // Not sure how I feel about updating values on the org directly... org.membershipStateTemporary = state; orgs.push(org); callback(error); }); }, function (/* ignoredError */) { callback(null, orgs); }); }; // ---------------------------------------------------------------------------- // Retrieve all of the teams -across all registered organizations. This is not // specific to the user. This will include secret teams. // Caching: the org.getTeams call has an internal cache at this time. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getAllOrganizationsTeams = function (callback) { const self = this; async.concat(self.orgs(), function (org, cb) { org.getTeams(cb); }, function (error, teams) { if (error) { return callback(error); } // CONSIDER: SORT: Do these results need to be sorted? callback(null, teams); }); }; // ---------------------------------------------------------------------------- // This function uses heavy use of caching since it is an expensive set of // calls to make to the GitHub API when the cache misses: N API calls for N // teams in M organizations. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getMyTeamMemberships = function (role, alternateUserId, callback) { const self = this; if (typeof alternateUserId == 'function') { callback = alternateUserId; alternateUserId = self.id.github; } this.getAllOrganizationsTeams(function (error, teams) { if (error) { return callback(error); } const myTeams = []; async.each(teams, function (team, callback) { team.getMembersCached(role, function (error, members) { if (error) { // If the team was deleted since the cache was created, this is not an error worth propagating. if (error.statusCode === 404) { return callback(); } return callback(error); } for (let i = 0; i < members.length; i++) { const member = members[i]; if (member.id == alternateUserId) { myTeams.push(team); break; } } callback(); }); }, function (error) { callback(error, myTeams); }); }); }; // ---------------------------------------------------------------------------- // Designed for use by tooling, this returns the full set of administrators of // teams across all orgs. Designed to help setup communication with the people // using this portal for their daily engineering group work. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.getAllMaintainers = function (callback) { this.getAllOrganizationsTeams(function (getTeamsError, teams) { if (getTeamsError) { return callback(getTeamsError); } const users = {}; async.each(teams, function (team, callback) { team.getMembersCached('maintainer', function (getTeamMembersError, members) { if (getTeamMembersError) { return callback(getTeamMembersError); } for (let i = 0; i < members.length; i++) { const member = members[i]; if (users[member.id] === undefined) { users[member.id] = member; } // A dirty patch on top, just to save time now. if (users[member.id]._getAllMaintainersTeams === undefined) { users[member.id]._getAllMaintainersTeams = {}; } users[member.id]._getAllMaintainersTeams[team.id] = team; } callback(); }); }, function (getMembersIterationError) { if (getMembersIterationError) { return callback(getMembersIterationError); } const asList = []; for (const key in users) { const user = users[key]; asList.push(user); } async.each(asList, function (user, cb) { user.getLink(cb); }, function (getUserLinkError) { callback(getUserLinkError, asList); }); }); }); }; // ---------------------------------------------------------------------------- // Retrieve a set of team results. // ---------------------------------------------------------------------------- // [_] CONSIDER: Cache/ Consider caching this sort of important return result... OpenSourceUserContext.prototype.getTeamSet = function (teamIds, inflate, callback) { const self = this; if (typeof inflate === 'function') { callback = inflate; inflate = false; } const teams = []; async.each(teamIds, function (teamId, cb) { self.getTeam(teamId, inflate, function (error, team) { if (!error) { teams.push(team); } cb(error); }); }, function (error) { // CONSIDER: SORT: Do these results need to be sorted? callback(error, teams); }); }; // ---------------------------------------------------------------------------- // Retrieve a single team instance. This version hydrates the team's details // and also sets the organization instance. // ---------------------------------------------------------------------------- // [_] CONSIDER: Cache/ Consider caching this sort of important return result... OpenSourceUserContext.prototype.getTeam = function (teamId, callback) { const self = this; const team = createBareTeam(self, teamId); team.getDetails(function (error) { if (error) { error = utils.wrapError(error, 'There was a problem retrieving the details for the team. The team may no longer exist.'); } callback(error, error ? null : team); }); }; // ---------------------------------------------------------------------------- // Prepare a list of all organization names, lowercased, from the original // config instance. // ---------------------------------------------------------------------------- function allOrgNamesLowercase(orgs) { const list = []; if (orgs && orgs.length) { for (let i = 0; i < orgs.length; i++) { const name = orgs[i].name; if (!name) { throw new Error('No organization name has been provided for one of the configured organizations.'); } list.push(name.toLowerCase()); } } return list; } // ---------------------------------------------------------------------------- // Retrieve the "primary" organization by identifying which org, if any, has // the grand portal sudoers team defined. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.primaryOrg = function getPrimaryOrg() { const orgs = this.orgs(); let primaryOrg = null; orgs.forEach((org) => { const teamPortalSudoers = org.setting('teamPortalSudoers'); if (teamPortalSudoers && primaryOrg === null) { primaryOrg = org; } else if (teamPortalSudoers) { const warning = 'Only one organization may contain a grand sudoers team. Please have an application administrator investigate this issue.'; console.warn(warning); } }); if (!primaryOrg && orgs.length === 1) { return orgs[0]; } return primaryOrg; }; // ---------------------------------------------------------------------------- // Retrieve an array of all organizations registered for management with this // portal instance. Used for iterating through global operations. We'll need to // use smart caching to land this experience better than in the past, and to // preserve API use rates. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.orgs = function getAllOrgs() { const self = this; const allOrgNames = allOrgNamesLowercase(self.setting('github').organizations); const orgs = []; for (let i = 0; i < allOrgNames.length; i++) { orgs.push(self.org(allOrgNames[i])); } return orgs; }; // ---------------------------------------------------------------------------- // Retrieve a user-scoped elevated organization object via a static // configuration lookup. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.org = function getOrg(orgNameAnycase) { if (orgNameAnycase === undefined || orgNameAnycase === '') { console.warn('Using the first organization for tokens and other properties. This can cause problems when multiple tokens are in use.'); orgNameAnycase = this.setting('github').organizations[0].name; } const name = orgNameAnycase.toLowerCase(); if (this.cache.orgs[name]) { return this.cache.orgs[name]; } let settings; const orgs = this.setting('github').organizations; for (let i = 0; i < orgs.length; i++) { if (orgs[i].name && orgs[i].name.toLowerCase() == name) { settings = orgs[i]; break; } } if (!settings) { throw new Error('The requested organization "' + orgNameAnycase + '" is not currently available for actions or is not configured for use at this time.'); } const tr = this.setting('corporate').trainingResources; if (tr && tr['onboarding-complete']) { const tro = tr['onboarding-complete']; const trainingResources = { corporate: tro.all, github: tro.github, }; if (tro[name]) { trainingResources.organization = tro[name]; } settings.trainingResources = trainingResources; } this.cache.orgs[name] = new Org(this, settings.name, settings); return this.cache.orgs[name]; }; // ---------------------------------------------------------------------------- // Retrieve an object representing the user, by GitHub ID. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.user = function getUser(id, optionalGitHubInstance) { const self = this; if (typeof id != 'string') { id = id.toString(); } if (self.cache.users[id]) { return self.cache.users[id]; } else { self.cache.users[id] = new User(self, id, optionalGitHubInstance); return self.cache.users[id]; } }; // ---------------------------------------------------------------------------- // Allows creating a team reference with just a team ID, no org instance. // ---------------------------------------------------------------------------- function createBareTeam(oss, teamId) { const teamInstance = new Team(oss.org(), teamId, null); teamInstance.org = null; return teamInstance; } // ---------------------------------------------------------------------------- // Helper function for UI: Store in the user's session an alert message or // action to be shown in another successful render. Contexts come from Twitter // Bootstrap, i.e. 'success', 'info', 'warning', 'danger'. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.saveUserAlert = function (req, message, title, context, optionalLink, optionalCaption) { const alert = { message: message, title: title || 'FYI', context: context || 'success', optionalLink: optionalLink, optionalCaption: optionalCaption, }; if (req.session) { if (req.session.alerts && req.session.alerts.length) { req.session.alerts.push(alert); } else { req.session.alerts = [ alert, ]; } } }; // ---------------------------------------------------------------------------- // Helper function for UI: Render a view. By using our own rendering function, // we can make sure that events such as alert views are still actually shown, // even through redirect sequences. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.render = function (req, res, view, title, optionalObject) { if (typeof title == 'object') { optionalObject = title; title = ''; debug('context::render: the provided title was actually an object'); } const breadcrumbs = req.breadcrumbs; if (breadcrumbs && breadcrumbs.length && breadcrumbs.length > 0) { breadcrumbs[breadcrumbs.length - 1].isLast = true; } const authScheme = this.setting('authentication').scheme; const user = { primaryAuthenticationScheme: authScheme, primaryUsername: authScheme === 'github' ? this.usernames.github : this.usernames.azure, githubSignout: authScheme === 'github' ? '/signout' : '/signout/github', azureSignout: authScheme === 'github' ? '/signout/azure' : '/signout', }; if (this.id.github || this.usernames.github) { user.github = { id: this.id.github, username: this.usernames.github, displayName: this.displayNames.github, avatarUrl: this.avatars.github, accessToken: this.tokens.github !== undefined, increasedScope: this.tokens.githubIncreasedScope !== undefined, }; } if (this.usernames.azure) { user.azure = { username: this.usernames.azure, displayName: this.displayNames.azure, }; } const reposContext = req.reposContext || { section: 'orgs', org: req.org, }; const obj = { title: title, config: this.configuration, serviceBanner: this.setting('serviceMessage') ? this.setting('serviceMessage').banner : null, user: user, ossLink: this.entities.link, showBreadcrumbs: true, breadcrumbs: breadcrumbs, sudoMode: req.sudoMode, view: view, site: 'github', enableMultipleAccounts: req.session ? req.session.enableMultipleAccounts : false, }; if (obj.ossLink && reposContext) { obj.reposContext = reposContext; } if (optionalObject) { Object.assign(obj, optionalObject); } if (req.session && req.session.alerts && req.session.alerts.length && req.session.alerts.length > 0) { const alerts = []; Object.assign(alerts, req.session.alerts); req.session.alerts = []; for (let i = 0; i < alerts.length; i++) { if (typeof alerts[i] == 'object') { alerts[i].number = i + 1; } } obj.alerts = alerts; } if (reposContext && !reposContext.availableOrganizations) { this.getMyOrganizations((getMyOrgsError, organizations) => { if (!getMyOrgsError && organizations && Array.isArray(organizations)) { reposContext.availableOrganizations = organizations; res.render(view, obj); } }); } else { res.render(view, obj); } }; // ---------------------------------------------------------------------------- // Cheap breadcrumbs on a request object as it goes through our routes. Does // not actually store anything in the OSS instance at this time. // ---------------------------------------------------------------------------- OpenSourceUserContext.prototype.addBreadcrumb = function (req, breadcrumbTitle, optionalBreadcrumbLink) { utils.addBreadcrumb(req, breadcrumbTitle, optionalBreadcrumbLink); }; module.exports = OpenSourceUserContext; <file_sep>/views/settings/digestReports.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ./layout.pug block content div.container h1 Open source administrator report if availableReports && availableReports.length p.lead Please select a report: each report in availableReports h4: a(href='/settings/digestReports/administrator/' + report.id)= report.description else p No reports are currently available to you, that's great!<file_sep>/middleware/error-routes.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn"] }] */ module.exports = function configureErrorRoutes(app, initializationError) { if (initializationError) { console.warn('Initialization Error Present: All app requests will fail!'); // For convienience, failed initialization should appear // for any request. Should evaluate whether to hide for // production scenarios or if there is a risk of the // error message leaking sensitive data. app.use((req, res, next) => { var error = new Error('Application initialization error'); error.detailed = initializationError.message || null; error.innerError = initializationError; return next(error); }); } app.use(function (req, res, next) { var err = new Error('Not Found'); err.status = 404; err.skipLog = true; next(err); }); app.use(require('./errorHandler')); }; <file_sep>/middleware/links/usernameConsistency.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const wrapError = require('../../utils').wrapError; module.exports = function (validateGitHubAccount) { // This middleware is designed to keep a link up-to-date with // available GitHub information. // // The lightweight version (validateGitHubAccount is falsey) will // just use any information in the session for the user; this is // if they have recently authenticated with that service. // // The heavier version will actually make a request for the user's // GitHub user ID, and look for any key updates to fields from there. // Due to the latency this should only be used on pages performing // data changes on behalf of the GitHub user. function lightweightSessionConsistency(req, res, next) { if (!req.oss || !req.oss.modernUser() || req.oss.modernUser().link === false) { return next(); } const link = req.oss.modernUser().link; if (req.user.azure && req.user.azure.oid && link.aadoid && req.user.azure.oid !== link.aadoid) { return next(new Error('Directory security identifier mismatch. Please submit a report to have this checked on.')); } if (req.user.github && req.user.github.id && link.ghid && req.user.github.id !== link.ghid) { let securityError = new Error('GitHub user security identifier mismatch. Did you delete your GitHub account and recreate an identically named one? Please submit a report to have this checked on for security purposes. Operations: if this is a valid request, delete remnents of the previous user account.'); const multipleAccountsEnabled = req.session.selectedGithubId && req.session.enableMultipleAccounts === true; if (multipleAccountsEnabled) { securityError = wrapError(null, 'You are currently signed in to an account on GitHub.com that is different than the one you have selected for your session. Please sign out of GitHub and head back.', true); securityError.fancyLink = { title: 'Sign out of GitHub', link: '/signout/github?redirect=github', }; } try { req.oss.invalidateLinkCache(req.oss.setting('authentication').scheme, req.oss.id.aad, () => {}); // Try to invalidate any cached links to help with ops scenarios } catch (ignoreError) { // This does not impact providing the user with an error message } return next(securityError); } const linkUpdates = {}; const updatedProperties = new Set(); const sessionToLinkMap = { github: { username: 'ghu', avatarUrl: 'ghavatar', accessToken: 'githubToken', }, githubIncreasedScope: { accessToken: 'githubTokenIncreasedScope', }, azure: { displayName: 'aadname', username: 'aadupn', }, }; for (let sessionKey in sessionToLinkMap) { for (let property in sessionToLinkMap[sessionKey]) { const linkProperty = sessionToLinkMap[sessionKey][property]; if (req.user[sessionKey] && req.user[sessionKey][property] && link[linkProperty] !== req.user[sessionKey][property]) { linkUpdates[linkProperty] = req.user[sessionKey][property]; updatedProperties.add(`${sessionKey}.${property}`); } } } if (updatedProperties.has('github.accessToken')) { linkUpdates.githubTokenUpdated = new Date().getTime(); } if (updatedProperties.has('githubIncreasedScope.accessToken')) { linkUpdates.githubTokenIncreasedScopeUpdated = new Date().getTime(); } if (Object.keys(linkUpdates).length === 0) { return next(); } Object.assign(link, linkUpdates); req.oss.modernUser().updateLink(link, (mergeError) => { if (mergeError) { req.insights.trackMetric('LinkConsistencyFailures', 1); req.insights.trackEvent('LinkConsistencyFailure', { updates: JSON.stringify(linkUpdates), error: mergeError.message, }); return next(mergeError); } req.insights.trackMetric('LinkConsistencySuccesses', 1); req.insights.trackEvent('LinkConsistencySuccess', { updates: JSON.stringify(linkUpdates), }); req.oss.setPropertiesFromLink(link, () => { next(); }); }); } function heavyConsistency(req, res, next) { 'use strict'; const context = req.oss; if (!context || !context.id.github) { return next(new Error('A middleware component expected a user context ahead of validating the GitHub account.')); } const operations = req.app.settings.operations; const account = operations.getAccount(context.id.github); account.getDetails((error) => { if (error) { return next(wrapError(error, 'Your GitHub account details could not be retrieved at this time through the GitHub API.')); } if (account.login === context.usernames.github) { return next(); } const oldLogin = context.usernames.github; const user = context.modernUser(); const link = user.link; link.ghu = account.login; if (account.avatar_url && account.avatar_url !== link.ghavatar) { link.ghavatar = account.avatar_url; } user.updateLink(link, (error) => { if (error) { req.insights.trackMetric('GitHubUserConsistencyFailures', 1); req.insights.trackEvent('GitHubUserConsistencyFailure', { oldLogin: oldLogin, oid: link.aadoid, login: account.login, error: error.message, }); return next(wrapError(error, 'It looks like your GitHub username has changed, but we were not able to update our records. Please try again soon or report this error.')); } req.insights.trackMetric('GitHubUserConsistencySuccesses', 1); req.insights.trackEvent('GitHubUserConsistencySuccess', { oldLogin: oldLogin, oid: link.aadoid, login: account.login, }); context.usernames.github = account.login; if (req.user.github) { req.user.github.username = account.login; } // Need to re-save the entire user req.login(req.user, () => { return next(); }); }); }); } return validateGitHubAccount ? heavyConsistency : lightweightSessionConsistency; }; <file_sep>/routes/unlink.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); const async = require('async'); const utils = require('../utils'); router.use(function (req, res, next) { var oss = req.oss; var memberOfOrgs = []; async.each(oss.orgs(), function (o, callback) { o.queryUserMembership(false /* no caching */, function (error, result) { var state = null; if (result && result.state) { state = result.state; } if (state == 'active' || state == 'pending') { memberOfOrgs.push(o); } callback(error); }); }, function (error) { if (error) { return next(error); } req.currentOrganizationMemberships = memberOfOrgs; next(); }); }); router.get('/', function (req, res, next) { var oss = req.oss; oss.modernUser().getActiveOrganizationMemberships((error, currentOrganizationMemberships) => { if (error) { return next(error); } var link = req.oss.entities.link; if (link && link.ghu) { return req.oss.render(req, res, 'unlink', 'Remove corporate link and organization memberships', { orgs: currentOrganizationMemberships, }); } else { return next('No link could be found.'); } }); }); router.post('/', function (req, res, next) { req.oss.modernUser().unlinkAndDrop((error) => { req.insights.trackEvent('PortalUserUnlink'); if (error) { return next(utils.wrapError(error, 'You were successfully removed from all of your organizations. However, a minor failure happened during a data housecleaning operation. Double check that you are happy with your current membership status on GitHub.com before continuing. Press Report Bug if you would like this handled for sure.')); } res.redirect('/signout?unlink'); }); }); module.exports = router; <file_sep>/webhooks/tasks/membership.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["dir", "log"] }] */ 'use strict'; module.exports = { filter: function (data) { let eventType = data.properties.event; return eventType === 'membership'; }, run: function (operations, organization, data, callback) { if (data.body.action && data.body.scope && data.body.member.login && data.body.member.id) { console.log(`${data.body.organization.login} ${data.body.scope} membership: ${data.body.action} ${data.body.scope} ${data.body.member.login} ${data.body.member.id} team ${data.body.team.id} ${data.body.team.name}`); // update the team in question /* const immediateRefreshOptions = { backgroundRefresh: false, maxAgeSeconds: 0, }; */ console.log(`refreshing members in the team ${data.body.team.name} ${data.body.team.id} list`); const team = organization.team(data.body.team.id); // TODO: get team members team.getDetails(); team.getMembers({ backgroundRefresh: false, maxAgeSeconds: 0.1, }, (getMembersError, members) => { let num = ''; if (!getMembersError && members && members.length) { num = members.length; } console.log(`refreshed ${num} team members, getting maintainers`); team.getMembers({ role: 'maintainer', backgroundRefresh: false, maxAgeSeconds: 0.1, }, (getMaintainersError, maintainers) => { let num2 = ''; if (!getMaintainersError && maintainers && maintainers.length) { num2 = members.length; } console.log(`refreshed ${num2} team maintainers`); }); }); } else { console.dir(data); } callback(); }, }; <file_sep>/routes/orgs.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const router = express.Router(); const orgRoute = require('./org/'); const utils = require('../utils'); router.use('/:orgName', function (req, res, next) { // This middleware contains both the original GitHub operations types // as well as the newer implementation. In time this will peel apart. const oss = req.oss; const orgName = req.params.orgName; const operations = req.app.settings.operations; try { req.org = oss.org(orgName); req.organization = operations.getOrganization(orgName); return next(); } catch (ex) { if (orgName.toLowerCase() == 'account') { return res.redirect('/'); } const err = utils.wrapError(null, 'Organization not found', true); err.status = 404; return next(err); } }); router.use('/:orgName', orgRoute); module.exports = router; <file_sep>/views/email/repoApprovals/decision.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../body block content h1 #{pendingRequest.org}/#{pendingRequest.repoName} if wasApproved p | Repo URL:&nbsp; a(href='https://github.com/' + pendingRequest.org + '/' + pendingRequest.repoName, target='_new') https://github.com/#{pendingRequest.org}/#{pendingRequest.repoName} p.lead. Your repo <strong>"#{pendingRequest.repoName}"</strong> has been created in the <strong>"#{pendingRequest.org}"</strong> organization successfully. if pendingRequest.repoVisibility == 'private' p. Since the new repo is private, you may receive a 404 if you yourself are not a member of one of the teams that you requested have access. Let <EMAIL> know if you need support. if results.length h3 Operations Performed ul each result in results li(style=result.error ? 'color:red' : undefined)= result.message else p Unfortunately your request was not approved at this time. Your request has been closed. if decisionNote h2 Message from decision maker table(style='width:80%') tbody tr td p.lead= decisionNote h2 Decision maker p = decisionBy br = decisionEmail if pendingRequest.justification h2 Your request table(style='width:80%') tbody tr td p= pendingRequest.justification <file_sep>/views/unlink.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends layout block content //- Conditions for this page: // - already linked div.container h1 Unlink #{user.azure.username} and #{user.github.username} (GitHub)? p.lead Sorry to see you go. if orgs && orgs.length && orgs.length > 0 - var unlinkText = 'Unlink anyway' p Before you leave, you're currently a member of the following #{config.brand.companyName} organization#{orgs.length == 1 ? '' : 's'} on GitHub: ul each org in orgs li= org.name p Unlinking your accounts will remove you from all of the organizations listed above and <strong>may result in data loss</strong>. ul li Your GitHub account, #{user.github.username}, will lose privileged access to these orgs. li Private forks of repos from these orgs, if any, will be removed by GitHub. li Work committed in a private fork of these org's repos will be lost. else - var unlinkText = 'Unlink' p You are not in any #{config.brand.companyName} orgs, so nothing should change in your GitHub experience. p &nbsp; form(method='post') p input.btn.btn-lg.btn-danger(type='submit', value=unlinkText) | &nbsp; &nbsp; &nbsp; a.btn.btn-lg.btn-default(href='/') Cancel <file_sep>/routes/index-authenticated.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn"] }] */ const _ = require('lodash'); const express = require('express'); const router = express.Router(); const async = require('async'); const OpenSourceUserContext = require('../lib/context'); const linkRoute = require('./link'); const linkedUserRoute = require('./index-linked'); const linkCleanupRoute = require('./link-cleanup'); const placeholdersRoute = require('./placeholders'); const settingsRoute = require('./settings'); const usernameConsistency = require('../middleware/links/usernameConsistency'); const utils = require('../utils'); router.use(function (req, res, next) { var config = req.app.settings.runtimeConfig; if (req.isAuthenticated()) { var expectedAuthenticationProperty = config.authentication.scheme === 'github' ? 'github' : 'azure'; if (req.user && !req.user[expectedAuthenticationProperty]) { console.warn(`A user session was authenticated but did not have present the property "${expectedAuthenticationProperty}" expected for this type of authentication. Signing them out.`); return res.redirect('/signout'); } var expectedAuthenticationKey = config.authentication.scheme === 'github' ? 'id' : 'oid'; if (!req.user[expectedAuthenticationProperty][expectedAuthenticationKey]) { return next(new Error('Invalid information present for the authentication provider.')); } return next(); } utils.storeOriginalUrlAsReferrer(req, res, config.authentication.scheme === 'github' ? '/auth/github' : '/auth/azure', 'user is not authenticated and needs to authenticate'); }); router.use((req, res, next) => { var options = { config: req.app.settings.runtimeConfig, dataClient: req.app.settings.dataclient, redisClient: req.app.settings.dataclient.cleanupInTheFuture.redisClient, redisHelper: req.app.settings.redisHelper, githubLibrary: req.app.settings.githubLibrary, ossDbClient: req.app.settings.ossDbConnection, request: req, insights: req.insights, }; new OpenSourceUserContext(options, (error, instance) => { req.oss = instance; if (error && (error.tooManyLinks === true || error.anotherAccount === true)) { // The only URL permitted in this state is the cleanup endpoint and special multiple-account endpoint if (req.url === '/link/cleanup' || req.url === '/link/enableMultipleAccounts' || req.url.startsWith('/placeholder')) { return next(); } return res.redirect('/link/cleanup'); } instance.addBreadcrumb(req, 'Organizations'); return next(error); }); }); router.use('/placeholder', placeholdersRoute); router.use('/link/cleanup', linkCleanupRoute); router.use('/link', linkRoute); router.use('/settings', settingsRoute); // Link cleanups router.use(usernameConsistency()); // Ensure we have a GitHub token for AAD users once they are linked. This is // for users of the portal before the switch to supporting primary authentication // of a type other than GitHub. router.use((req, res, next) => { if (req.app.settings.runtimeConfig.authentication.scheme === 'aad' && req.oss && req.oss.modernUser()) { var link = req.oss.modernUser().link; if (link && !link.githubToken) { return utils.storeOriginalUrlAsReferrer(req, res, '/link/reconnect', 'no GitHub token or not a link while authenticating inside of index-authenticated.js'); } } next(); }); router.get('/', function (req, res, next) { const operations = req.app.settings.providers.operations; var oss = req.oss; var link = req.oss.entities.link; var config = req.app.settings.runtimeConfig; var onboarding = req.query.onboarding !== undefined; // var allowCaching = onboarding ? false : true; if (!link) { if (config.authentication.scheme === 'github' && req.user.azure === undefined || config.authentication.scheme === 'aad' && req.user.github === undefined) { return oss.render(req, res, 'welcome', 'Welcome'); } if (config.authentication.scheme === 'github' && req.user.azure && req.user.azure.oid || config.authentication.scheme === 'aad' && req.user.github && req.user.github.id) { return res.redirect('/link'); } return next(new Error('This account is not yet linked, but a workflow error is preventing further progress. Please report this issue. Thanks.')); } // They're changing their corporate identity (rare, often just service accounts) if (config.authentication.scheme === 'github' && link && link.aadupn && req.user.azure && req.user.azure.username && req.user.azure.username.toLowerCase() !== link.aadupn.toLowerCase()) { return res.redirect('/link/update'); } // var twoFactorOff = null; var warnings = []; var activeOrg = null; async.parallel({ isLinkedUser: function (callback) { var link = oss.entities.link; callback(null, link && link.ghu ? link : false); }, overview: (callback) => { const id = oss.id.github; if (!id) { return callback(); } const uc = operations.getUserContext(id); return uc.getAggregatedOverview(callback); }, isAdministrator: function (callback) { callback(null, false); // oss.isAdministrator(callback); // CONSIDER: Re-implement isAdministrator } }, function (error, results) { if (error) { return next(error); } const overview = results.overview; results.countOfOrgs = operations.organizations.length; let groupedAvailableOrganizations = null; // results may contains undefined returns because we skip some errors to make sure homepage always load successfully. if (overview.organizations) { if (overview.organizations.member.length) { results.countOfOrgs = overview.organizations.member.length; if (overview.organizations.member.length > 0) { results.twoFactorOn = true; // TODO: How to verify in a world with some mixed 2FA value orgs? } } if (overview.organizations.available) { groupedAvailableOrganizations = _.groupBy(operations.getOrganizations(overview.organizations.available), 'priority'); } } if (results.isAdministrator && results.isAdministrator === true) { results.isSudoer = true; } if (results.twoFactorOff === true) { var tempOrgNeedToFix = oss.org(); return res.redirect(tempOrgNeedToFix.baseUrl + 'security-check'); } var render = function (results) { if (warnings && warnings.length > 0) { req.oss.saveUserAlert(req, warnings.join(', '), 'Some organizations or memberships could not be loaded', 'danger'); } var pageTitle = results && results.userOrgMembership === false ? 'My GitHub Account' : config.brand.companyName + ' - ' + config.brand.appName; oss.render(req, res, 'index', pageTitle, { accountInfo: results, onboarding: onboarding, onboardingPostfixUrl: onboarding === true ? '?onboarding=' + config.brand.companyName : '', activeOrgUrl: activeOrg ? activeOrg.baseUrl : '/?', getOrg: (orgName) => { return operations.getOrganization(orgName); }, groupedAvailableOrganizations: groupedAvailableOrganizations, }); }; if (overview.teams && overview.teams.maintainer) { const maintained = overview.teams.maintainer; if (maintained.length > 0) { var teamsMaintainedHash = {}; maintained.forEach(maintainedTeam => { teamsMaintainedHash[maintainedTeam.id] = maintainedTeam; }); results.teamsMaintainedHash = teamsMaintainedHash; // dc.getPendingApprovals(teamsMaintained, function (error, pendingApprovals) { // if (error) { // return next(error); // } // results.pendingApprovals = pendingApprovals; // render(results); // }); } } render(results); }); }); router.use(linkedUserRoute); module.exports = router; <file_sep>/views/settings/authorizations.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends layout block content h1 Account authorizations if authorizations.length === 0 p There are no services or accounts currently authorized and stored with your open source account. each authorization in authorizations h2= authorization.title if authorization.text p.lead= authorization.text if authorization.valid - var valid = authorization.valid - var alert = valid.valid === true ? 'success' : valid.critical === true ? 'danger' : 'warning' div.alert(class='alert-' + alert) if valid.valid h2 Valid Token else h2 Validation Failed if valid.message && valid.valid p= valid.message else if valid.message p strong= valid.message if valid.rateLimitRemaining p small= valid.rateLimitRemaining each mitigation in authorization.mitigations h5= mitigation.title if mitigation.text p= mitigation.text if mitigation.url p a.btn.btn-default.btn-sm(href=mitigation.url)= mitigation.mitigation hr h4 Validate tokens p If requested by support, you can use this function to validate the current tokens stored for your account. a.btn.btn-default.btn-sm(href='/settings/authorizations/validate') Validate authorizations<file_sep>/routes/org/join.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); const usernameConsistency = require('../../middleware/links/usernameConsistency'); const utils = require('../../utils'); router.use(function (req, res, next) { var org = req.org; var err = null; if (org.setting('locked')) { err = new Error('This organization is locked to new members.'); err.detailed = 'At this time, the maintainers of the "' + org.name + '" organization have decided to not enable onboarding through this portal.'; err.skipLog = true; } next(err); }); // The join route is an important part of the onboarding experience, so we should // burn a few additional tokens validating the user's username. This route is // using the newer operations codepath. router.use(usernameConsistency(true /* use GitHub API */)); router.get('/', function (req, res, next) { const org = req.org; const context = req.oss; const userIncreasedScopeToken = context && context.tokens ? context.tokens.githubIncreasedScope : null; var onboarding = req.query.onboarding; var showTwoFactorWarning = false; var showApplicationPermissionWarning = false; var writeOrgFailureMessage = null; org.queryUserMembership(false /* do not allow caching */, function (error, result) { var state = result && result.state ? result.state : false; var clearAuditListAndRedirect = function () { org.clearAuditList(function () { var url = org.baseUrl + 'security-check' + (onboarding ? '?onboarding=' + onboarding : '?joining=' + org.name); res.redirect(url); }); }; var showPage = function () { org.getDetails(function (error, details) { if (error) { return next(error); } var userDetails = details ? org.oss.user(details.id, details) : null; var title = org.name + ' Organization Membership ' + (state == 'pending' ? 'Pending' : 'Join'); req.oss.render(req, res, 'org/pending', title, { result: result, state: state, hasIncreasedScope: userIncreasedScopeToken ? true : false, org: org, orgUser: userDetails, onboarding: onboarding, writeOrgFailureMessage: writeOrgFailureMessage, showTwoFactorWarning: showTwoFactorWarning, showApplicationPermissionWarning: showApplicationPermissionWarning, }); }); }; if (state == 'active') { clearAuditListAndRedirect(); } else if (state == 'pending' && userIncreasedScopeToken) { org.acceptOrganizationInvitation(userIncreasedScopeToken, function (error, updatedState) { if (error) { // We do not error out, they can still fall back on the // manual acceptance system that the page will render. writeOrgFailureMessage = error.message || 'The GitHub API did not allow us to join the organization for you. Follow the instructions to continue.'; if (error.statusCode == 401) { // These comparisons should be == and not === return redirectToIncreaseScopeExperience(req, res, 'GitHub API status code was 401'); } else if (error.statusCode == 403 && writeOrgFailureMessage.includes('two-factor')) { showTwoFactorWarning = true; } else if (error.statusCode == 403) { showApplicationPermissionWarning = true; } } if (!error && updatedState && updatedState.state === 'active') { return clearAuditListAndRedirect(); } showPage(); }); } else { showPage(); } }); }); function redirectToIncreaseScopeExperience(req, res, optionalReason) { utils.storeOriginalUrlAsReferrer(req, res, '/auth/github/increased-scope', optionalReason); } router.get('/express', function (req, res, next) { var org = req.org; var onboarding = req.query.onboarding; const context = req.oss; org.queryUserMembership(false /* do not allow caching */, function (error, result) { var state = result && result.state ? result.state : false; if (state == 'active' || state == 'pending') { res.redirect(org.baseUrl + 'join' + (onboarding ? '?onboarding=' + onboarding : '?joining=' + org.name)); } else if (context && context.tokens.githubIncreasedScope) { joinOrg(req, res, next); } else { utils.storeOriginalUrlAsReferrer(req, res, '/auth/github/increased-scope', 'need to get increased scope and current org state is ' + state); } }); }); function joinOrg(req, res, next) { var org = req.org; var onboarding = req.query.onboarding; var everyoneTeam = org.getAllMembersTeam(); var username = req.oss.usernames.github; everyoneTeam.addMembership('member', function (error) { if (error) { req.insights.trackMetric('GitHubOrgInvitationFailures', 1); req.insights.trackEvent('GitHubOrgInvitationFailure', { org: org.name, username: username, error: error.message, }); var specificMessage = error.message ? 'Error message: ' + error.message : 'Please try again later. If you continue to receive this message, please reach out for us to investigate.'; if (error.code === 'ETIMEDOUT') { specificMessage = 'The GitHub API timed out.'; } return next(utils.wrapError(error, `We had trouble sending you an invitation through GitHub to join the ${org.name} organization. ${username} ${specificMessage}`)); } req.insights.trackMetric('GitHubOrgInvitationSuccesses', 1); req.insights.trackEvent('GitHubOrgInvitationSuccess', { org: org.name, username: username, }); res.redirect(org.baseUrl + 'join' + (onboarding ? '?onboarding=' + onboarding : '?joining=' + org.name)); }); } router.post('/', joinOrg); module.exports = router; <file_sep>/lib/npm/publish.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn"] }] */ 'use strict'; const exec = require('child-process-promise').exec; const fs = require('mz/fs'); const npmRegistryClient = require('npm-registry-client'); const path = require('path'); const Q = require('q'); const tmp = require('tmp'); const emailRender = require('../../lib/emailRender'); const findNpm = require('./findNpm'); const wrapError = require('../../utils').wrapError; const defaultCloneTimeout = 1000 * 60 * 2; // allow 2 minutes to clone the repository const defaultPublishTimeout = 1000 * 60 * 1; // allow 1 minute to publish the package const npmRegistryAuthUri = '//registry.npmjs.org/'; const npmRegistryRootUri = `https:${npmRegistryAuthUri}`; const npmRegistryUri = `${npmRegistryRootUri}npm`; // OPTIONS //--------- // operations (required) // npm.username (on behalf of username, should have been validated in the past) // allowPublishingExistingPackages (whether to allow any user to publish to an official package, false by default) // ignorePublishScripts (acknowledgement from the user that their publish scripts will be ignored if scripts are present) // collaborators (array of npm usernames who will be given permission to also push the package) // notify (array of e-mail addresses to notify about the npm creation) module.exports = function publish(options) { const operations = options.operations; const npmOnBehalfOf = options.npm ? options.npm.username : null; if (!operations) { return Q.reject(new Error('Missing operations parameter')); } if (!npmOnBehalfOf) { return Q.reject(new Error('Missing requestor NPM username')); } if (!operations.config.npm.publishing.token) { return Q.reject(new Error('Missing NPM token')); } if (!options.clone) { return Q.reject(new Error('No Git repo provided to clone')); } const context = { options: options, temp: {}, package: null, log: [], }; const finalCleanupTemporaryPath = cleanupIfNeeded.bind(null, context); return Q(context) .then(learnNpmVersion) .then(learnNpmUser) .then(createTemporaryPath) .then(cloneRepository) .then(discoverPackageMetadata) .then(validatePackageOwnership) .then(processPublishScripts) .then(placePublishingToken) .then(publishPackage) .then(assignOwners) .then(notify) .finally(finalCleanupTemporaryPath); }; function notify(context) { const operations = context.options.operations; const notifyPeople = context.options.notify; const notifyPerson = operations.config.npm.publishing ? operations.config.npm.publishing.notify : null; const mailProvider = operations.mailProvider; if (!mailProvider) { return Q(context); } return getUserEmailAddress(context, operations).then(address => { let to = []; let cc = []; if (address) { to.push(address); } if (notifyPeople && notifyPeople.length) { for (let i = 0; i < notifyPeople.length; i++) { to.push(notifyPeople[i]); } } if (notifyPerson) { (to.length === 0 ? to : cc).push(notifyPerson); } return sendEmail(mailProvider, to, cc, context); }); } function sendEmail(mailProvider, to, cc, context) { const deferred = Q.defer(); const headline = context.package.name + ' published'; const subject = `NPM ${context.packageVersionedName} published by ${context.options.upn}`; const emailTemplate = 'npm/published'; const mail = { to: to, cc: cc, from: context.options.operations.config.npm.publishing.notifyFrom, subject: subject, reason: `You are receiving this e-mail because ${context.options.upn} published an NPM package and chose to notify you of this event. To stop receiving mails like this, please approach ${context.options.upn} and ask to no longer be notified.`, headline: headline, classification: 'information', service: 'Microsoft NPM publishing', }; const contentOptions = { log: context.log, context: context, }; emailRender.render(context.options.basedir, emailTemplate, contentOptions, (renderError, mailContent) => { if (renderError) { return deferred.resolve(context); } mail.content = mailContent; mailProvider.sendMail(mail, (mailError) => { if (mailError) { context.log.push('There was a problem sending the notification e-mail, but the package was published OK'); } else { context.log.push('Sent notification e-mail'); } return deferred.resolve(context); }); }); return deferred.promise; } function getUserEmailAddress(context, operations) { const deferred = Q.defer(); const mailAddressProvider = operations.mailAddressProvider; if (!mailAddressProvider) { return Q(); } const upn = context.options.upn; if (!upn) { return Q(); } mailAddressProvider.getAddressFromUpn(upn, (resolveError, mailAddress) => { if (!resolveError && mailAddress) { return deferred.resolve(mailAddress); } return deferred.resolve(); }); return deferred.promise; } function assignOwners(context) { const ownerPromises = []; const collaborators = context.options.collaborators || []; collaborators.push(context.options.npm.username); for (let i = 0; i < collaborators.length; i++) { ownerPromises.push(assignOwner(context, context.packageName, collaborators[i])); } return Q.allSettled(ownerPromises).then(settled => { for (let i = 0; i < settled.length; i++) { const st = settled[i]; const settledMessage = st.reason || st.value; context.log.push(settledMessage); } return Q(context); }); } function assignOwner(context, packageName, newOwner) { const cmd = `${context.npmLocation} owner add ${newOwner}`; const options = { cwd: context.temp.repoPath, }; return exec(cmd, options).then(npmOutput => { const cp = npmOutput.childProcess; if (!cp || cp.exitCode !== 0) { throw new Error('There was a problem with NPM.'); } // note, output will have + then the username IF they are added context.log.push(`Added ${newOwner} as a package collaborator`); return Q(); }); } function placePublishingToken(context) { const cwd = context.temp.repoPath; const npmrc = path.join(cwd, '.npmrc'); const npmToken = context.options.operations.config.npm.publishing.token; if (!npmToken) { return Q.reject(new Error('No publishing token is available')); } const token = `${npmRegistryAuthUri}:_authToken=${npmToken}`; return fs.writeFile(npmrc, token, 'utf8').then(() => { return Q(context); }, failed => { throw wrapError(failed, 'Could not authorize the system to publish the package'); }); } function learnNpmUser(context) { const deferred = Q.defer(); const config = context.options.operations.config; const npm = new npmRegistryClient(); const npmParameters = { timeout: 2000, auth: { token: config.npm.publishing.token, }, }; npm.whoami(npmRegistryUri, npmParameters, (error, username) => { if (error) { return deferred.reject(wrapError(error, 'Could not validate the publishing NPM user')); } context.npmServiceAccount = username; return deferred.resolve(context); }); return deferred.promise; } function discoverPackageMetadata(context) { const cwd = context.temp.repoPath; const pkgPath = path.join(cwd, 'package.json'); return fs.readFile(pkgPath, 'utf8').then(contents => { const packageParsed = JSON.parse(contents); context.package = packageParsed; return Q(context); }, notFound => { return Q.reject(wrapError(notFound, 'The repository does not have a "package.json" file in the root, so cannot be published by this system. Is it a Node.js library or application?')); }); } function processPublishScripts(context) { if (!context || !context.package) { throw new Error('The "package.json" file could not be properly processed.'); } const scripts = context.package.scripts; if (!scripts) { return Q(context); } const packageName = context.package.name; const packageVersionedName = `${packageName}@${context.package.version}`; context.packageVersionedName = packageVersionedName; context.log.push(`Publishing version ${packageVersionedName}`); const ignorePublishScripts = context.options.ignorePublishScripts || false; const scriptsNotPermitted = [ 'prepublish', 'prepare', // New for NPM 4 'prepublishOnly', // New for NPM 4, temporarily ]; // Scan for scripts that could be a security concern. The underlying worry is // that a publish script, of which there are a few varieties in the NPM 4 era, // could learn how to read the official token for the organization, going // rogue. This does mean that packages that are built in CoffeeScript or TS // will need to either publish a dummy first package, or create a temporary // branch with the output, etc. This is also actually a good thing, since the // goal here is not to be a build server or offer runtime/devtime dep. to // build a proper package. let updatedPackage = false; let invalidScripts = []; for (let i = 0; i < scriptsNotPermitted.length; i++) { const scriptName = scriptsNotPermitted[i]; if (scripts[scriptName]) { if (ignorePublishScripts) { delete scripts[scriptName]; context.log.push(`For security reasons, the ${scriptName} script was not processed for this publish operation. Please publish an incremental update using your authorized NPM client if you need the script to properly build the release package.`); updatedPackage = true; } else { invalidScripts.push(scriptName); } } } // Interrupt the process for a user choice if (invalidScripts.length > 0) { const scriptsList = invalidScripts.join(', '); const userChoiceError = new Error(`The package.json file for the ${packageVersionedName} NPM contains scripts that cannot be executed for security purposes. This system is not a build server. The script(s) in question are: ${scriptsList}.`); userChoiceError.userChoice = true; userChoiceError.userChoiceType = 'removeScripts'; userChoiceError.npmScriptNames = invalidScripts; throw userChoiceError; } if (updatedPackage) { context.securityUpdates = true; return updateLocalPackage(context); } else { return Q(context); } } function updateLocalPackage(context) { const updatedPackage = context.package; const content = JSON.stringify(updatedPackage, undefined, 2); const cwd = context.temp.repoPath; const pkgPath = path.join(cwd, 'package.json'); return fs.writeFile(pkgPath, content, 'utf8').then(() => { context.log.push('Updated package.json file for security purposes'); return Q(context); }, (/*failedWrite*/) => { throw new Error('Could not update the package.json ahead of publishing'); }); } function learnNpmVersion(context) { return findNpm().then(npmLocation => { if (npmLocation.includes(' ')) { npmLocation = '"' + npmLocation + '"'; } context.npmLocation = npmLocation; const cmd = `${npmLocation} -v`; return exec(cmd).then(npmOutput => { const cp = npmOutput.childProcess; if (!cp || cp.exitCode !== 0) { throw new Error('There was a problem with NPM.'); } const version = (npmOutput.stdout || '').trim(); if (!version) { throw new Error('There was a problem trying to identify the NPM version available to the publishing service.'); } context.npmVersion = version; context.log.push(`Using NPM version ${version} to publish`); return Q(context); }, failure => { throw wrapError(failure, 'NPM is not be available for publishing at this time or cannot be found.'); }); }); } function publishPackage(context) { const options = { cwd: context.temp.repoPath, timeout: context.options.publishTimeout || defaultPublishTimeout, }; const cmd = `${context.npmLocation} publish --access public`; return exec(cmd, options).then((/*publishResult*/) => { return Q(context); // exitCode for hte process is 0; stderr "" stdout has the log }, failure => { if (failure.code === 1 && failure.stderr && failure.stderr.includes('cannot publish over')) { throw wrapError(failure, `You cannot publish over a previously published identical version ${context.packageVersionedName}. Please commit an incremented version to your package.json file.`, true); } throw failure; }); } function cloneRepository(context) { let gitRepo = context.options.clone; let gitBranch = context.options.branch || 'master'; const localRepoPath = 'repo'; const options = { cwd: context.temp.path, timeout: context.options.cloneTimeout || defaultCloneTimeout, }; const cmd = `git clone ${gitRepo} --branch ${gitBranch} --single-branch ${localRepoPath}`; context.temp.repoPath = path.join(options.cwd, localRepoPath); return exec(cmd, options).then(() => { return Q(context); }, failure => { let error = failure; if (failure.killed) { error = new Error(`The Git repository ${gitRepo} and branch ${gitBranch} could not be cloned and processed in time. The operation took too long.`); } else if (failure.stderr) { error = new Error(`The Git repository ${gitRepo} (branch ${gitBranch}) ran into trouble trying to clone and process: ${failure.stderr}`); } throw error; }); } function createTemporaryPath(context) { const deferred = Q.defer(); const pathOptions = { unsafeCleanup: true, prefix: 'npm-', }; tmp.dir(pathOptions, (createPathError, path, cleanupCallback) => { if (createPathError) { return deferred.reject(createPathError); } context.temp.path = path; context.temp.cleanup = cleanupCallback; return deferred.resolve(context); }); return deferred.promise; } function cleanupIfNeeded(context) { if (context && context.temp && context.temp.cleanup) { const deferred = Q.defer(); context.temp.cleanup(() => { deferred.resolve(); }); return deferred.promise; } else { return Q(); } } function validatePackageOwnership(context) { // If we have "read-write" access to an existing owner, either // the user making the request or the primary account name, this // is OK. If allowPublishingExistingPackages is not enabled, // then only the user themselves can be authorized to publish // here. That is to prevent security incidents where someone // overwrites a package with a new version. const allowedUsernames = new Set(); const allowPublishingExistingPackages = context.options.allowPublishingExistingPackages || false; const npmServiceAccount = context.npmServiceAccount.toLowerCase(); // These are the usernames we will use to see whether the user can authorize the publish if (allowPublishingExistingPackages) { allowedUsernames.add(npmServiceAccount); } const onBehalfOfUser = context.options.npm.username.toLowerCase(); allowedUsernames.add(onBehalfOfUser); const deferred = Q.defer(); const packageName = context.package.name; const config = context.options.operations.config; const npm = new npmRegistryClient(); const params = { package: context.package.name, }; const packageUri = `${npmRegistryRootUri}${packageName}`; npm.get(packageUri, params, (error, packageData) => { if (error && error.statusCode === 404) { context.log.push(`Verified that there is not yet a package named ${packageName} in the NPMJS registry.`); return deferred.resolve(context); } if (error) { return deferred.reject(error); } context.packageData = packageData; const params = { package: packageName, auth: { token: config.npm.publishing.token, }, }; // Get the current owners of the package return npm.access('ls-collaborators', npmRegistryUri, params, (error, collaborators) => { if (error) { return deferred.reject(wrapError(error, `Could not validate what NPM users have permission to publish the ${packageName} package.`)); } let authorizedPublisher = false; let serviceAccountCanPublish = false; const usernames = Object.getOwnPropertyNames(collaborators); for (let i = 0; i < usernames.length; i++) { const username = usernames[i]; if (collaborators[username] === 'read-write') { const lc = username.toLowerCase(); if (lc === npmServiceAccount) { serviceAccountCanPublish = true; } if (allowedUsernames.has(lc)) { authorizedPublisher = true; } } } if (!serviceAccountCanPublish) { return deferred.reject(new Error(`The service account for publishing, ${npmServiceAccount}, is not authorized to publish the ${packageName} package to NPMJS`)); } if (!authorizedPublisher) { return deferred.reject(new Error(`${onBehalfOfUser} is not authorized to publish the ${packageName} package to NPMJS`)); } context.log.push(`Publishing the package as ${npmServiceAccount} on behalf of authorized package collaborator ${onBehalfOfUser}`); return deferred.resolve(context); }); }); return deferred.promise; } <file_sep>/routes/api/createRepo.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const _ = require('lodash'); const async = require('async'); const emailRender = require('../../lib/emailRender'); const jsonError = require('./jsonError'); const RepoWorkflowEngine = require('../org/RepoWorkflowEngine.js'); const supportedLicenseExpressions = [ 'mit', '(mit and cc-by-4.0)', 'other', ]; const hardcodedApprovalTypes = [ 'ReleaseReview', 'SmallLibrariesToolsSamples', 'Migrate', 'Exempt', ]; const hardcodedClaEntities = [ 'Microsoft', '.NET Foundation', ]; function createRepo(req, res, convergedObject, token, callback, doNotCallbackForSuccess) { if (!req.org) { return callback(jsonError(new Error('No organization available in the route.'), 400)); } const operations = req.app.settings.operations; const dc = req.app.settings.dataclient; const mailProvider = req.app.settings.mailProvider; const ourFields = [ 'ms.onBehalfOf', 'ms.license', 'ms.approval', 'ms.approval-url', 'ms.justification', 'ms.cla-entity', 'ms.cla-mail', 'ms.notify', 'ms.teams', 'ms.template', ]; const properties = {}; const parameters = req.body; ourFields.forEach((fieldName) => { if (parameters[fieldName] !== undefined) { properties[fieldName] = parameters[fieldName]; delete parameters[fieldName]; } }); const msProperties = { onBehalfOf: properties['ms.onBehalfOf'] || req.headers['ms-onbehalfof'], justification: properties['ms.justification'] || req.headers['ms-justification'], license: properties['ms.license'] || req.headers['ms-license'], approvalType: properties['ms.approval'] || req.headers['ms-approval'], approvalUrl: properties['ms.approval-url'] || req.headers['ms-approval-url'], claMail: properties['ms.cla-mail'] || req.headers['ms-cla-mail'], claEntity: properties['ms.cla-entity'] || req.headers['ms-cla-entity'], notify: properties['ms.notify'] || req.headers['ms-notify'], teams: properties['ms.teams'] || req.headers['ms-teams'], template: properties['ms.template'] || req.headers['ms-template'], }; // Validate licenses let msLicense = msProperties.license; if (!msLicense) { return callback(jsonError('Missing Microsoft license information', 422)); } msLicense = msLicense.toLowerCase(); if (supportedLicenseExpressions.indexOf(msLicense) < 0) { return callback(jsonError('The provided license expression is not currently supported', 422)); } // Validate approval types const msApprovalType = msProperties.approvalType; if (!msApprovalType) { return callback(jsonError('Missing Microsoft approval type information', 422)); } if (hardcodedApprovalTypes.indexOf(msApprovalType) < 0) { return callback(jsonError('The provided approval type is not supported', 422)); } // Validate specifics of what is in the approval switch (msApprovalType) { case 'ReleaseReview': if (!msProperties.approvalUrl) { return callback(jsonError('Approval URL for the release review is required when using the release review approval type', 422)); } break; case 'SmallLibrariesToolsSamples': break; case 'Migrate': break; case 'Exempt': if (!msProperties.justification) { return callback(jsonError('Justification is required when using the exempted approval type', 422)); } break; default: return callback(jsonError('The requested approval type is not currently supported.', 422)); } // Validate CLA entity if (msProperties.claEntity && hardcodedClaEntities.indexOf(msProperties.claEntity) < 0) { return callback(jsonError('The provided CLA entity name is not supported', 422)); } parameters.org = req.org.name; const organization = operations.getOrganization(parameters.org); // TODO: POST-1ES DAY REMOVE/FIX UNNEEDED CODE HERE delete parameters.confirmedPolicyException; operations.github.call(token, 'repos.createForOrg', parameters, (error, result) => { if (error) { // TODO: insights return callback(jsonError(error, error.code || 500)); } // strip an internal "cost" part off our response object delete result.cost; // from this point on any errors should roll back req.repoCreateResponse = { github: result, name: result && result.name ? result.name : undefined, }; req.approvalRequest = { ghu: msProperties.onBehalfOf, justification: msProperties.justification, requested: ((new Date()).getTime()).toString(), active: false, license: msProperties.license, type: 'repo', org: req.org.name.toLowerCase(), repoName: result.name, repoId: result.id, repoDescription: result.description, repoUrl: result.homepage, repoVisibility: result.private ? 'private' : 'public', approvalType: msProperties.approvalType, approvalUrl: msProperties.approvalUrl, claMail: msProperties.claMail, claEntity: msProperties.claEntity, template: msProperties.template, // API-specific: apiVersion: req.apiVersion, api: true, correlationId: req.correlationId, }; let teamNumber = 0; const teamTypes = ['pull', 'push', 'admin']; downgradeBroadAccessTeams(organization, msProperties.teams); for (let i = 0; msProperties.teams && i < teamTypes.length; i++) { const teamType = teamTypes[i]; const idList = msProperties.teams[teamType]; if (idList && idList.length) { for (let j = 0; j < idList.length; j++) { const num = teamNumber++; const prefix = 'teamid' + num; req.approvalRequest[prefix] = idList[j]; req.approvalRequest[prefix + 'p'] = teamType; } } } req.approvalRequest.teamsCount = teamNumber; dc.insertGeneralApprovalRequest('repo', req.approvalRequest, (insertRequestError, requestId) => { if (insertRequestError) { return rollbackRepoError(req, res, callback, 'There was a problem recording information about the repo request', 500, insertRequestError); } req.approvalRequest['ms.approvalId'] = requestId; const repoWorkflow = new RepoWorkflowEngine(null, req.org, { request: req.approvalRequest }); repoWorkflow.generateSecondaryTasks(function (err, tasks) { async.series(tasks || [], function (taskErr, output) { if (taskErr) { return rollbackRepoError(req, res, callback, 'There was a problem with secondary tasks associated with the repo request', 500, taskErr); } if (output) { req.repoCreateResponse.tasks = output; } function done() { if (doNotCallbackForSuccess) { res.status(201); return res.json(req.repoCreateResponse); } else { return callback(null, req.repoCreateResponse); } } if (msProperties.notify && mailProvider) { sendEmail(req, mailProvider, req.apiKeyRow, req.correlationId, output, req.approvalRequest, msProperties, () => { done(); }); } else { done(); } }); }); }); }); } function downgradeBroadAccessTeams(organization, teams) { const broadAccessTeams = new Set(organization.broadAccessTeams); if (teams.admin && Array.isArray(teams.admin)) { _.remove(teams.admin, teamId => { if (broadAccessTeams.has(teamId)) { if (!teams.pull) { teams.pull = []; } teams.pull.push(teamId); return true; } return false; }); } if (teams.pull && Array.isArray(teams.pull)) { teams.pull = _.uniq(teams.pull); // deduplicate } } function rollbackRepoError(req, res, next, error, statusCode, errorToLog) { const err = jsonError(error, statusCode); if (errorToLog) { req.insights.trackException(errorToLog, { event: 'ApiRepoCreateRollbackError', message: error && error.message ? error.message : error, }); } if (!req.org || !req.repoCreateResponse || !req.repoCreateResponse.name) { return next(err); } const repo = req.org.repo(req.repoCreateResponse.name); repo.delete(() => { return next(err); }); } function sendEmail(req, mailProvider, apiKeyRow, correlationId, repoCreateResults, approvalRequest, msProperties, callback) { const config = req.app.settings.runtimeConfig; const emails = msProperties.notify.split(','); const headline = 'Repo ready'; const serviceShortName = apiKeyRow && apiKeyRow.service ? apiKeyRow.service : undefined; const subject = serviceShortName ? `${approvalRequest.repoName} repo created by ${serviceShortName}` : `${approvalRequest.repoName} repo created`; const emailTemplate = 'repoApprovals/autoCreated'; const displayHostname = req.hostname; const approvalScheme = displayHostname === 'localhost' && config.webServer.allowHttp === true ? 'http' : 'https'; const reposSiteBaseUrl = `${approvalScheme}://${displayHostname}/`; const mail = { to: emails, subject: subject, reason: `You are receiving this e-mail because an API request included the e-mail notification address(es) ${msProperties.notify} during the creation of a repo.`, headline: headline, classification: 'information', service: 'Microsoft GitHub', correlationId: correlationId, }; const contentOptions = { correlationId: correlationId, approvalRequest: approvalRequest, results: repoCreateResults, version: config.logging.version, reposSiteUrl: reposSiteBaseUrl, api: serviceShortName, // when used by the client single-page app, this is not considered an API call service: serviceShortName, serviceOwner: apiKeyRow ? apiKeyRow.owner : undefined, serviceDescription: apiKeyRow ? apiKeyRow.description : undefined, }; emailRender.render(req.app.settings.basedir, emailTemplate, contentOptions, (renderError, mailContent) => { if (renderError) { req.insights.trackException(renderError, { content: contentOptions, eventName: 'ApiRepoCreateMailRenderFailure', }); return callback(renderError); } mail.content = mailContent; mailProvider.sendMail(mail, (mailError, mailResult) => { const customData = { content: contentOptions, receipt: mailResult, }; if (mailError) { customData.eventName = 'ApiRepoCreateMailFailure'; req.insights.trackException(mailError, customData); return callback(mailError); } req.insights.trackEvent('ApiRepoCreateMailSuccess', customData); req.repoCreateResponse.notified = emails; callback(); }); }); } module.exports = createRepo; <file_sep>/middleware/keyVault.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const adalNode = require('adal-node'); const azureKeyVault = require('azure-keyvault'); module.exports = function createClient(kvConfig) { if (!kvConfig.clientId) { throw new Error('KeyVault client ID required at this time for the middleware to initialize.'); } if (!kvConfig.clientSecret) { throw new Error('KeyVault client credential/secret required at this time for the middleware to initialize.'); } const authenticator = (challenge, authCallback) => { const context = new adalNode.AuthenticationContext(challenge.authorization); return context.acquireTokenWithClientCredentials(challenge.resource, kvConfig.clientId, kvConfig.clientSecret, (tokenAcquisitionError, tokenResponse) => { if (tokenAcquisitionError) { return authCallback(tokenAcquisitionError); } const authorizationValue = `${tokenResponse.tokenType} ${tokenResponse.accessToken}`; return authCallback(null, authorizationValue); }); }; const credentials = new azureKeyVault.KeyVaultCredentials(authenticator); const keyVaultClient = new azureKeyVault.KeyVaultClient(credentials); return keyVaultClient; }; <file_sep>/webhooks/tasks/automaticTeams.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["log", "warn"] }] */ 'use strict'; const teamTypes = ['read', 'write', 'admin']; const defaultLargeAdminTeamSize = 100; const async = require('async'); const emailRender = require('../../lib/emailRender'); function processOrgSpecialTeams(organization) { const specialTeams = organization.specialRepositoryPermissionTeams; let specials = []; let specialTeamIds = new Set(); let specialTeamLevels = new Map(); teamTypes.forEach(specialTeam => { if (specialTeams[specialTeam] && specialTeams[specialTeam].length) { specials.push(specialTeam); for (let i = 0; i < specialTeams[specialTeam].length; i++) { const teamId = specialTeams[specialTeam][i]; specialTeamIds.add(teamId); specialTeamLevels.set(teamId, translateSpecialToGitHub(specialTeam)); } } }); return [specialTeams, specials, specialTeamIds, specialTeamLevels]; } module.exports = { processOrgSpecialTeams: processOrgSpecialTeams, filter: function (data) { const eventType = data.properties.event; const eventAction = data.body.action; // Someone added a team to the repo if (eventType === 'team' && ['add_repository', 'added_to_repository'].includes(eventAction)) { return true; } // Someone removed a team from the repo if (eventType === 'team' && eventAction === 'removed_from_repository') { return true; } // Team permission level changed if (eventType === 'team' && eventAction === 'edited') { return true; } // A new repo may need the teams if (eventType === 'repository' && eventAction === 'created') { return true; } return false; }, run: function (operations, organization, data, callback) { const eventType = data.properties.event; const eventAction = data.body.action; const [/*specialTeams*/, /*specials*/, specialTeamIds, specialTeamLevels] = processOrgSpecialTeams(organization); const preventLargeTeamPermissions = organization.preventLargeTeamPermissions; const recoveryTasks = []; const repositoryBody = data.body.repository; const newPermissions = repositoryBody ? repositoryBody.permissions : null; const whoChangedIt = data.body && data.body.sender ? data.body.sender.login : null; const whoChangedItId = whoChangedIt ? data.body.sender.id : null; function finalizeEventRemediation(immediateError) { if (immediateError) { return callback(immediateError); } if (recoveryTasks.length <= 0) { return callback(); } async.waterfall(recoveryTasks, (error) => { const insights = operations.insights; if (error) { insights.trackException(error); } return callback(error); }); } // New repository if (eventType === 'repository' && eventAction === 'created') { specialTeamIds.forEach(teamId => { const necessaryPermission = specialTeamLevels.get(teamId); recoveryTasks.push(createSetTeamPermissionTask(operations, organization, repositoryBody, teamId, necessaryPermission, `a new repository was created by username ${whoChangedIt}, setting automatic permissions`)); }); } else if (eventType === 'team') { const teamBody = data.body.team; const teamId = teamBody.id; const teamName = teamBody.name; // Enforce required special team permissions if (specialTeamIds.has(teamId)) { const necessaryPermission = specialTeamLevels.get(teamId); if (!necessaryPermission) { return callback(new Error(`No ideal permission level found for the team ${teamId}.`)); } if (eventAction === 'removed_from_repository') { // Someone removed the entire team recoveryTasks.push(createSetTeamPermissionTask(operations, organization, repositoryBody, teamId, necessaryPermission, `the team and its permission were removed by the username ${whoChangedIt}`)); } else if (eventAction === 'edited') { // The team no longer has the appropriate permission level if (newPermissions[necessaryPermission] !== true) { recoveryTasks.push(createSetTeamPermissionTask(operations, organization, repositoryBody, teamId, necessaryPermission, `the permission was downgraded by the username ${whoChangedIt}`)); } } return finalizeEventRemediation(); } // Prevent granting large teams access if (preventLargeTeamPermissions) { return getTeamSize(organization, teamId, (getTeamError, teamSize) => { if (getTeamError) { return callback(getTeamError); } // Special thanks to the GitHub API team. The added_to_repository event did not // include the 'permissions' information. Fixed and deployed by GitHub on // 6/13/17. Thank you for helping us simplify our code! if (['added_to_repository', 'edited'].includes(eventAction) && newPermissions) { const specificReason = teamTooLargeForPurpose(teamId, newPermissions.admin, newPermissions.push, organization, teamSize, preventLargeTeamPermissions); if (specificReason) { // CONSIDER: system/ops accounts may actually be useful to consider allowing via operations.isSystemAccountByUsername addLargeTeamPermissionRevertTasks(recoveryTasks, operations, organization, repositoryBody, teamId, teamName, whoChangedIt, whoChangedItId, specificReason); } } return finalizeEventRemediation(); }); } } return finalizeEventRemediation(); }, }; function teamTooLargeForPurpose(teamId, isAdmin, isPush, organization, teamSize, preventLargeTeamPermissions) { const broadAccessTeams = organization.broadAccessTeams; let isBroadAccessTeam = broadAccessTeams && broadAccessTeams.includes(teamId); if (isBroadAccessTeam && (isAdmin || isPush)) { return 'The team is a very broad access team and does not allow push or admin access'; } let teamSizeLimitAdmin = defaultLargeAdminTeamSize; let teamSizeLimitType = 'default limit'; if (preventLargeTeamPermissions && preventLargeTeamPermissions.maximumAdministrators) { teamSizeLimitAdmin = preventLargeTeamPermissions.maximumAdministrators; teamSizeLimitType = `administrator team limit in the ${organization.name} organization`; } if (isAdmin && teamSize >= teamSizeLimitAdmin) { return `The team has ${teamSize} members which surpasses the ${teamSizeLimitAdmin} ${teamSizeLimitType}`; } return false; } function translateSpecialToGitHub(ourTerm) { switch (ourTerm) { case 'admin': return 'admin'; case 'write': return 'push'; case 'read': return 'pull'; } throw new Error(`Unknown team type ${ourTerm}`); } function getTeamSize(organization, teamId, callback) { const team = organization.team(teamId); team.getDetails(error => { if (error) { return callback(error); } return callback(null, team.members_count || 0); }); } function addLargeTeamPermissionRevertTasks(recoveryTasks, operations, organization, repositoryBody, teamId, teamName, whoChangedIt, whoChangedItId, specificReason) { specificReason = specificReason ? ': ' + specificReason : ''; const blockReason = `the permission was upgraded by ${whoChangedIt} but a large team permission prevention feature has reverted the change${specificReason}`; console.log(blockReason); const insights = operations.insights; insights.trackMetric('JobAutomaticTeamsLargeTeamPermissionBlock', 1); insights.trackEvent('JobAutomaticTeamsLargeTeamPermissionBlocked', { specificReason: specificReason, teamId: teamId, organization: organization.name, repository: repositoryBody.name, whoChangedIt: whoChangedIt, whoChangedItId: whoChangedItId, }); recoveryTasks.push(createSetTeamPermissionTask(operations, organization, repositoryBody, teamId, 'pull', blockReason)); const owner = repositoryBody.owner.login.toLowerCase(); // We do not want to notify for each fork, if the permissions bubble to the fork if (owner === organization.name.toLowerCase()) { recoveryTasks.push(createLargeTeamPermissionPreventionWarningMailTask(operations, organization, repositoryBody, teamId, teamName, blockReason, whoChangedIt, whoChangedItId)); } } function createLargeTeamPermissionPreventionWarningMailTask(operations, organization, repositoryBody, teamId, teamName, reason, whoChangedIt, whoChangedItId) { // System accounts should not need notifications const mailProvider = operations.providers.mailProvider; const insights = operations.providers.insights; if (!mailProvider || operations.isSystemAccountByUsername(whoChangedIt)) { return emptyCallback; } const senderMember = organization.member(whoChangedItId); return callback => { senderMember.getMailAddress((error, mailAddress) => { if (error || !mailAddress) { return emptyCallback; } sendEmail(insights, operations.providers.basedir, mailProvider, mailAddress, { repository: repositoryBody, whoChangedIt: whoChangedIt, teamName: teamName, reason: reason, }, callback); }); }; } function emptyCallback(callback) { return callback(); } function sendEmail(insights, basedir, mailProvider, to, body, callback) { const mail = { to: to, cc: '<EMAIL>', subject: `Team permission change for ${body.repository.full_name} repository reverted`, reason: `You are receiving this e-mail because you changed the permissions on the ${body.teamName} GitHub team, triggering this action.`, headline: 'Team permission change reverted', classification: 'warning', service: 'Microsoft GitHub', }; emailRender.render(basedir, 'largeTeamProtected', body, (renderError, mailContent) => { if (renderError) { insights.trackException(renderError, { content: body, eventName: 'JobAutomaticTeamsLargeTeamPermissionBlockMailRenderFailure', }); return callback(renderError); } mail.content = mailContent; mailProvider.sendMail(mail, (mailError, mailResult) => { const customData = { content: body, receipt: mailResult, }; if (mailError) { customData.eventName = 'JobAutomaticTeamsLargeTeamPermissionBlockMailFailure'; insights.trackException(mailError, customData); return callback(mailError); } insights.trackEvent('JobAutomaticTeamsLargeTeamPermissionBlockMailSuccess', customData); callback(); }); }); } function createSetTeamPermissionTask(operations, organization, repositoryBody, teamId, necessaryPermission, reason) { const repoName = repositoryBody.name; const orgName = organization.name; const description = `setting permission level ${necessaryPermission} for the team with ID ${teamId} on the repository ${repoName} inside the ${orgName} GitHub org because ${reason}`; return callback => { const repository = organization.repository(repoName); const insights = operations.insights; repository.setTeamPermission(teamId, necessaryPermission, error => { const eventRoot = 'AutomaticRepoPermissionSet'; const eventName = eventRoot + error ? 'Success' : 'Failure'; if (error) { error.description = description; console.warn(`${eventName} ${description}`); } else { console.log(`${eventName} ${description}`); } if (insights) { insights.trackEvent(eventName, { success: !!error, reason: reason, description: description, }); } return callback(error); }); }; } <file_sep>/views/people/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block append js_doc_ready include ../js/search.js block content - var octicon = viewServices.octicon - var itemType = 'members' - var teamAdmin = specificTeamPermissions && specificTeamPermissions.allowAdministration .container if team2 .nav ul.pager.zero-pad-bottom li.previous a(href='/' + organization.name + '/teams/' + team2.slug) span(aria-hidden=true) &larr; = ' Back to the ' + team2.name + ' team' if team2AddType h1 Add #{team2AddType}s to the #{team2.name} team else h1 People in the #{team2.name} team h5= organization.name + ' organization' else if organization h1 | People small= ' in the ' + organization.name + ' GitHub organization' else h1 People p.lead Members of officially managed Microsoft organizations if reposDataAgeInformation p.text-primary(style='margin-bottom:24px') if reposDataAgeInformation.changed = 'Updated ' + reposDataAgeInformation.changed if reposDataAgeInformation.updated && reposDataAgeInformation.changed |, refreshed else | Refreshed if reposDataAgeInformation.updated = ' ' + reposDataAgeInformation.updated .row .col-md-10 //-ul.nav.nav-pills li(class=(search.sort === 'Alphabet' ? 'active' : ''), title='Alphabetically sorted') a(href='?sort=Alphabet&tag=' + (tag ? tag : '') + (query.phrase ? '&q=' + query.phrase : '')) != octicon('text-size', 20) | Name form.form-horizontal#entitySearch(style='margin-top:24px') .form-group .col-md-6 div.input-group input.form-control#inputQuery( placeholder='Search members', type='text', value=query && query.phrase ? query.phrase : null, style='max-width:400px') span.input-group-btn button( class='btn btn-muted' type='submit' style='border-width: 1px') Search .col-md-6 ul.nav.nav-pills li.dropdown(role='presentation') a.dropdown-toggle#typeLabel(data-toggle='dropdown', href='#', role='button', aria-haspopup='true', aria-expanded='false') = 'Type: ' if query && query.type strong= query.type else strong All span.caret ul.dropdown-menu.border-1px-primary(aria-labelledby='typeLabel', style='border-top:0;margin-top:0;padding-top:0;padding-bottom:0') - var currentType = query && query.type ? query.type : 'all' li(class={ active: currentType === 'all' }) a(href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + ('&type=all') + (query.phrase ? '&q=' + query.phrase : '')) All li(class={ active: currentType === 'linked' }) a(href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + ('&type=linked') + (query.phrase ? '&q=' + query.phrase : '')) Linked li(class={ active: currentType === 'unlinked' }) a(href=href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + ('&type=unlinked') + (query.phrase ? '&q=' + query.phrase : '')) Unlinked //-li(class={ active: currentType === 'former' }) a(href=href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + ('&type=former') + (query.phrase ? '&q=' + query.phrase : '')) Former //-li(class={ active: currentType === 'active' }) a(href=href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + ('&type=active') + (query.phrase ? '&q=' + query.phrase : '')) Active employment //-li(class={ active: currentType === 'serviceAccount' }) a(href=href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + ('&type=serviceAccount') + (query.phrase ? '&q=' + query.phrase : '')) Service accounts //-li(class={ active: currentType === 'unknownAccount' }) a(href=href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + ('&type=unknownAccount') + (query.phrase ? '&q=' + query.phrase : '')) Linked accounts not in the directory //-li.dropdown(role='presentation') a.dropdown-toggle#twoFactorLabel(data-toggle='dropdown', href='#', role='button', aria-haspopup='true', aria-expanded='false') = 'Two factor: ' if query && query.twoFactor && query.twoFactor === 'off' strong Two-factor off else strong Any 2fa state span.caret ul.dropdown-menu.border-1px-primary(aria-labelledby='twoFactorLabel', style='border-top:0;margin-top:0;padding-top:0;padding-bottom:0') - var currentTwoFactor = query && query.twoFactor ? query.twoFactor : 'all' li(class={ active: currentTwoFactor === 'all' }) a(href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (search.type ? '&type=' + search.type : '') + ('&twoFactor=all') + (query.phrase ? '&q=' + query.phrase : '')) All li(class={ active: currentTwoFactor === 'off' }) a(href=href='?page_number=' + (search.page) + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (search.type ? '&type=' + search.type : '') + ('&twoFactor=off') + (query.phrase ? '&q=' + query.phrase : '')) 2FA off if filters.length > 0 p(style='margin-top:24px') if search.totalItems > 1 strong= search.totalItems.toLocaleString() | results else if search.totalItems === 1 strong 1 | result else | No results = ' for ' each filter in filters = filter.displayPrefix ? filter.displayPrefix + ' ' : '' strong= filter.displayValue || filter.value = ' ' = filter.displaySuffix ? filter.displaySuffix + ' ' : '' a.pull-right.btn.btn-sm.btn-muted-more(href='?') != octicon('x', 14) = ' Clear filter' hr if search.totalItems === 0 .well.well-lg div.text-center p != octicon('organization', 24) if team2 p.lead The #{team2.name} team doesn't have any people that match. else if organization p.lead This organization doesn't have any people that match. else p.lead No people match across all managed organizations. else nav(style='margin-bottom:48px') ul.pager li.previous(class=(search.page > 1 ? '' : 'disabled')) a(href='?page_number=' + (search.page-1) + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + (search.sort ? '&sort=' + search.sort : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") &larr; Previous li if search.totalItems == 1 | One member else | #{search.pageFirstItem.toLocaleString()} - #{search.pageLastItem.toLocaleString()} of #{search.totalItems.toLocaleString()} #{itemType} li.next(class=(search.page < search.totalPages ? '' : 'disabled')) a(href='?page_number=' + (search.page+1) + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + (search.sort ? '&sort=' + search.sort : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") Next &rarr; .row.vertical-pad-bottom .col-sm-8 .col-sm-4 if !organization h6 Memberships each person in search.members - var link = person.link - var account = person.account ? person.account : person - var corporate = person.corporate; - var serviceAccount = person.link && person.link.serviceAccount - var shorthandName = corporate && corporate.preferredName ? corporate.preferredName : (link ? (link.aadname || account.login) : account.login); - var corporateIdentity = corporate && corporate.alias ? corporate.alias : (link ? link.aadupn : null) - var email = corporate ? corporate.emailAddress || corporate.userPrincipalName : null .row.vertical-pad-bottom .col-sm-8 div.horizontal-space-right.vertical-space.pull-left(style='width:48px;height:48px;display:block;vertical-align:middle') if account.avatar_url img(src=account.avatar_url + '&s=96', style='width:48px;height:48px', title='Avatar of ' + shorthandName) div(style='display:block;vertical-align:middle') h4 a(href='https://github.com/' + account.login, target='_new')= shorthandName //-if !link = ' ' .label.label-danger Not linked if team2AddType && person.isTeamMember = ' ' .label.label-primary.shrink66 Team member ul.list-inline if shorthandName && shorthandName !== account.login li span(title=account.login + ' is the GitHub username for ' + shorthandName)= account.login if !link li .label.label-danger Not linked if corporateIdentity if link && !corporate li.text-warning!= octicon('link', 16) li span(title=link.aadoid)= link.aadupn else li!= octicon('link', 16) li span(title=corporateIdentity + ' is the corporate identity for ' + shorthandName)= corporateIdentity //- just corporate e-mails here, not user emails if email li a(href='mailto:' + email, title='Send corporate email to ' + email) != octicon('mail', 16) if serviceAccount li!= octicon('hubot', 16) li Service account if team2AddType && teamAdmin form(action=teamUrl + team2AddType + 's/add', method='post') input(type='hidden', name='username', value=account.login) input.btn.btn-sm.btn-muted( type='submit', onclick='return confirm(\'Are you sure that you want to add ' + account.login + ' as a team ' + team2AddType + '?\');', value= 'Add as ' + team2.name + ' team ' + team2AddType) if team2RemoveType && teamAdmin form(action=teamUrl + team2RemoveType + 's/remove', method='post') input(type='hidden', name='username', value=account.login) input.btn.btn-sm.btn-muted( type='submit', onclick='return confirm(\'Are you sure that you want to remove ' + account.login + ' from the team?\');', value= 'Remove from ' + team2.name + ' team') .col-sm-2 if !organization && person.orgs ul.list-unstyled each values, _org_ in person.orgs li= _org_ .col-sm-2 //- Temporary, adding a direct link; the user would still need to be a global sudoer to use the linked page if lightupSudoerLink a.btn.btn-sm.btn-default(target='_new', href='/organization/whois/github/' + account.login) Manage account nav ul.pager li.previous(class=(search.page > 1 ? '' : 'disabled')) a(href='?page_number=' + (search.page-1) + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + (search.sort ? '&sort=' + search.sort : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") &larr; Previous li h4(style="display:inline") | Page #{search.page} of #{search.totalPages} li.next(class=(search.page < search.totalPages ? '' : 'disabled')) a(href='?page_number=' + (search.page+1) + (query.twoFactor ? '&twoFactor=' + query.twoFactor : '') + (search.sort ? '&sort=' + search.sort : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") Next &rarr; hr p The presence of people in this list does not imply that they are an employee. <file_sep>/views/org/newRepoSpa.pug extends ../layout block js link(href='/client/dist/assets/vendor.css', rel='stylesheet') //- script(type='text/javascript', src='/js/jquery.min.js') script(src='/client/dist/assets/vendor.js') script(src='/client/dist/assets/client.js') script(type='text/javascript', src='/js/bootstrap.min.js') script(type='text/javascript', src='/js/jquery.timeago.js') block content #ember-app <file_sep>/middleware/logger.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const logger = require('morgan'); const encryptionMetadataKey = '_ClientEncryptionMetadata2'; const piiFormat = ':id :method :scrubbedUrl :status :response-time ms - :res[content-length] :encryptedSession :correlationId'; const format = ':method :scrubbedUrl :status :response-time ms - :res[content-length] :encryptedSession :correlationId'; logger.token('encryptedSession', function getUserId(req) { const config = req.app.settings.runtimeConfig; if (req.session && req.session.passport && req.session.passport.user) { const userType = config.authentication.scheme === 'aad' ? 'azure' : 'github'; return req.session.passport.user[userType] && req.session.passport.user[userType][encryptionMetadataKey] !== undefined ? 'encrypted' : 'plain'; } }); logger.token('id', function getUserId(req) { const config = req.app.settings.runtimeConfig; if (config) { const userType = config.authentication.scheme === 'aad' ? 'azure' : 'github'; return req.user && req.user[userType] && req.user[userType].username ? req.user[userType].username : undefined; } }); logger.token('correlationId', function getCorrelationId(req) { return req.correlationId; }); logger.token('scrubbedUrl', function getScrubbedUrl(req) { return req.scrubbedUrl || req.originalUrl || req.url; }); module.exports = function createLogger(config) { return logger(config && config.debug && config.debug.showUsers === true ? piiFormat : format); }; <file_sep>/routes/org/requestRepo.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // // This file is no longer used in production // TODO: Remove this file once the right open source strategy is understood const async = require('async'); const utils = require('../../utils'); const emailRender = require('../../lib/emailRender'); const RepoWorkflowEngine = require('./RepoWorkflowEngine.js'); const express = require('express'); const router = express.Router(); // INTERNAL NOTE: // requestRepo.js is heavily diverged between the public and private implementations. // Merge and commit with care. router.use(function (req, res, next) { req.oss.addBreadcrumb(req, 'Request a new repo'); next(); }); function waterfallCallback() { var args = Array.prototype.slice.call(arguments); var callback = args.pop(); args.unshift(null); callback.apply(undefined, args); } router.post('/', function (req, res, next) { var org = req.org; var oss = org.oss; var displayHostname = req.hostname; const config = req.app.settings.runtimeConfig; if (org.inner.settings.createReposDirect) { const directUrl = `https://github.com/organizations/${org.name}/repositories/new`; const directError = utils.wrapError(null, 'This organization does not allow repository requests through this portal. Please use GitHub.com directly.', true); directError.fancyLink = { title: 'Create a repo on GitHub.com', link: directUrl, }; return next(directError); } var orgHasCla = false; try { const claTeams = org.getLegacyClaTeams(true); orgHasCla = req.body.claEntity && claTeams[req.body.claEntity]; } catch (noClaError) { /* ignored */ } if (!req.body.name || (req.body.name.length !== undefined && req.body.name.length === 0)) { return next(utils.wrapError(null, 'Please provide a repo name.', true)); } if (orgHasCla && req.body.claEntity && (!req.body.claMail || (req.body.claMail.length !== undefined && req.body.claMail.length === 0))) { return next(utils.wrapError(null, 'Please provide an e-mail address to receive CLA notifications.', true)); } if (req.body.name.indexOf(' ') >= 0) { return next(utils.wrapError(null, 'Repos cannot have spaces in their name. Consider a dash.', true)); } if (req.body.name.includes('/')) { return next(utils.wrapError(null, 'Repos cannot have slashes in their name.', true)); } if (org.inner.settings.approvalTypes || config.github.approvalTypes.fields.approvalTypes) { var approvalType = req.body.approvalType; if ((org.inner.settings.exemptionDetailsRequired || config.github.approvalTypes.fields.exemptionDetailsRequired || []).indexOf(approvalType) >= 0) { if (!req.body.justification || (req.body.justification.length !== undefined && req.body.justification.length === 0)) { return next(utils.wrapError(null, 'Exemption details are required.', true)); } } } var userMailAddress = null; const repoApprovalTypesValues = config.github.approvalTypes.repo; if (repoApprovalTypesValues.length === 0) { return next(new Error('No repo approval providers configured.')); } const repoApprovalTypes = new Set(repoApprovalTypesValues); const mailProviderInUse = repoApprovalTypes.has('mail'); var issueProviderInUse = repoApprovalTypes.has('github'); if (!mailProviderInUse && !issueProviderInUse) { return next(new Error('No configured approval providers configured.')); } const approverMailAddresses = []; const mailProvider = req.app.settings.mailProvider; if (mailProviderInUse && !mailProvider) { return next(utils.wrapError(null, 'No mail provider is enabled, yet this application is configured to use a mail provider.')); } const mailAddressProvider = req.app.settings.mailAddressProvider; // Match the desired repo visibility to that which is supported in this org. // If no type is given, default to the best choice for the current org. var typeMap = { public: ['public'], private: ['private'], publicprivate: ['private', 'public'] }; var allowedTypes = typeMap[org.inner.settings.type.toLowerCase()]; if (!allowedTypes) return next(new Error('Service not configured with allowed repo types')); var repoType = req.body.visibility || allowedTypes[0]; if (allowedTypes.indexOf(repoType.toLowerCase()) === -1) return next(new Error('This org does not support creation of ' + repoType + ' repos')); req.body.visibility = repoType; if (!req.body.teamCount) { return next(new Error('Invalid.')); } var teamsRequested = []; var teamCount = Math.floor(req.body.teamCount); var foundAdminTeam = false; var i = 0; for (i = 0; i < teamCount + 1; i++) { var existingTeamId = req.body['existingTeam' + i]; if (existingTeamId && existingTeamId > 0) { existingTeamId = Math.floor(existingTeamId); var perm = req.body['existingTeamPermission' + i]; if (existingTeamId > 0 && perm == 'pull' || perm == 'push' || perm == 'admin') { var tr = { id: existingTeamId, permission: perm, }; teamsRequested.push(tr); if (perm == 'admin') { foundAdminTeam = true; } } } } if (!foundAdminTeam) { return next(utils.wrapError(null, 'You must select an admin team so that the repo can be written to and managed.', true)); } var dc = req.app.settings.dataclient; var team = org.getRepoApproversTeam(false); var template = 'other'; if (req.body.license && req.body.license.toLowerCase() === 'mit') { template = 'mit'; } var approvalRequest = { ghu: oss.usernames.github, ghid: oss.id.github, justification: req.body.justification, requested: ((new Date()).getTime()).toString(), active: false, teamid: team == null ? -1 : team.id, type: 'repo', org: org.name.toLowerCase(), repoName: req.body.name, repoDescription: req.body.description, repoUrl: req.body.url, repoVisibility: req.body.visibility, email: oss.modernUser().contactEmail(), license: req.body.license, approvalType: req.body.approvalType, approvalUrl: req.body.approvalUrl, gitignore_template: req.body.language, template: template, }; if (orgHasCla && req.body.claEntity) { approvalRequest.claMail = req.body.claMail; approvalRequest.claEntity = req.body.claEntity; } approvalRequest.teamsCount = teamsRequested.length; for (i = 0; i < teamsRequested.length; i++) { approvalRequest['teamid' + i] = teamsRequested[i].id; approvalRequest['teamid' + i + 'p'] = teamsRequested[i].permission; } var workflowRepository = null; try { workflowRepository = issueProviderInUse ? org.getWorkflowRepository() : null; } catch (noWorkflowRepoError) { issueProviderInUse = false; } var repoWorkflow = null; var createdNewRepoDetails = null; var isApprovalRequired = team != null; var generatedRequestId = null; var repoCreateResults = null; const approvalScheme = displayHostname === 'localhost' && config.webServer.allowHttp === true ? 'http' : 'https'; const reposSiteBaseUrl = `${approvalScheme}://${displayHostname}/`; const approvalBaseUrl = `${reposSiteBaseUrl}approvals/`; async.waterfall([ // Validate that this repo is new (callback) => { const existingRepo = org.repo(approvalRequest.repoName); existingRepo.getDetails((getDetailsError) => { if (getDetailsError) { return callback(); } const existsError = utils.wrapError(getDetailsError, `The repo "${approvalRequest.repoName}" already exists.`, true); existsError.detailed = 'If you cannot see it on GitHub, the repo is currently private and an active repo administrator would be able to help you get access.'; return callback(existsError); }); }, // get the user's e-mail address function (callback) { const upn = oss.modernUser().contactEmail(); mailAddressProvider.getAddressFromUpn(upn, (resolveError, mailAddress) => { if (resolveError) { return callback(resolveError); } userMailAddress = mailAddress; callback(); }); }, //Step 1 - get approval team members. function (callback) { if (isApprovalRequired === true) { team.getMemberLinks(function (error, maintainers) { if (error) { callback(new Error('It seems that the repo approvers information is unknown, or something happened when trying to query information about the team you are trying to apply to. Please file a bug or try again later. Sorry!'), null); return; } if (maintainers === undefined || maintainers.length === undefined || maintainers.length === 0) { callback(new Error('It seems that the repo approvers for this team is unknown. Please file a bug. Thanks.'), null); return; } var randomMaintainer = maintainers[Math.floor(Math.random() * maintainers.length)]; if (!randomMaintainer.link || !randomMaintainer.link.ghu) { req.insights.trackEvent('RandomMaintainerFailure', randomMaintainer); } var assignTo = randomMaintainer && randomMaintainer.link && randomMaintainer.link.ghu ? randomMaintainer.link.ghu : ''; var allMaintainers = []; async.each(maintainers, (maintainer, next) => { const approverUpn = maintainer && maintainer.link && maintainer.link.aadupn ? maintainer.link.aadupn : null; if (maintainer.link.ghu && approverUpn) { allMaintainers.push('@' + maintainer.link.ghu); mailAddressProvider.getAddressFromUpn(approverUpn, (getAddressError, mailAddress) => { if (getAddressError) { return next(getAddressError); } approverMailAddresses.push(mailAddress); next(); }); } else { next(); } }, (addressResolutionError) => { if (addressResolutionError) { return callback(addressResolutionError); } if (allMaintainers.length === 0) { return callback(new Error('No linked team maintainers are available to approve this request. Please report this issue, a maintainer may be needed for this team.')); } var consolidatedMaintainers = allMaintainers.join(', '); callback(null, { consolidatedMaintainers: consolidatedMaintainers, assignTo: assignTo }); }); }); } else { callback(null, { consolidatedMaintainers: '', assignTo: '' }); } }, //Step 2 - Store the request in azure table. function (args, callback) { dc.insertGeneralApprovalRequest('repo', approvalRequest, function (error, requestId) { if (error) { callback(error, null); return; } generatedRequestId = requestId; args.requestId = requestId; callback(null, args); }); }, //Step 3 - Create an issue in notification repository. issueProviderInUse === false ? waterfallCallback : function (args, callback) { var body = 'Hi,\n' + oss.usernames.github + ' has requested a new repo for the ' + org.name + ' ' + 'organization.' + '\n\n' + args.consolidatedMaintainers + ': Can a repo approver for this org review the request now at ' + '\n' + 'https://' + displayHostname + '/approvals/' + args.requestId + '?\n\n' + '<small>Note: This issue was generated by the open source portal.</small>' + '\n\n' + '<small>If you use this issue to comment with the team maintainers(s), please understand that your comment will be visible by all members of the organization.</small>'; workflowRepository.createIssue({ title: 'Request to create a repo - ' + oss.usernames.github, body: body, }, function (error, issue) { if (error) { callback(utils.wrapError(error, 'A tracking issue could not be created to monitor this request. Please contact the admins and provide this URL to them. Thanks.')); return; } if (isApprovalRequired == true) { req.oss.saveUserAlert(req, 'Your repo request has been submitted and will be reviewed by one of the repo approvers for the org for naming consistency, business justification, etc. Thanks!', 'Repo Request Submitted', 'success'); } args.issue = issue; callback(null, args); }); }, //Step 4 - Add issue id and number to request kept in db. issueProviderInUse === false ? waterfallCallback : function (args, callback) { if (args.issue.id && args.issue.number) { dc.updateApprovalRequest(args.requestId, { issueid: args.issue.id.toString(), issue: args.issue.number.toString(), active: true }, function (/* error is ignored - not sure why just linting now */) { callback(null, args); }); } else { callback(null, args); } }, //Step 5 - Assign an issue to approver. issueProviderInUse === false ? waterfallCallback : function (args, callback) { workflowRepository.updateIssue(args.issue.number, { assignee: args.assignTo, }, function (gitError) { if (gitError) { callback(gitError); } else { // CONSIDER: Log gitError. Since assignment fails for users // who have not used the portal, it should not actually // block the workflow from assignment. callback(null, args); } }); }, //Step 7 - Create a Repo if approval is not required function (args, callback) { if (isApprovalRequired == true) { return callback(null, args); } getRequestApprovalPkg(args.requestId, oss, dc, function (err, approvalPackage) { if (err) { return callback(utils.wrapError(err, 'A request authorization package could not be created at this time.')); } args.approvalPackage = approvalPackage; repoWorkflow = new RepoWorkflowEngine(null, org, approvalPackage); repoWorkflow.performApprovalOperation(function (err, newRepoDetails) { if (err) { err.detailed = 'Repo creation request is submitted but there was an error creating a repo.'; err.skipLog = true; return callback(err); } createdNewRepoDetails = newRepoDetails; callback(null, args); }); }); }, //Step 8 - Add teams to the repo as a next step for repo. creation. function (args, callback) { if (isApprovalRequired == true) { callback(null, args); return; } repoWorkflow.generateSecondaryTasks(function (err, tasks) { if (err) { callback(err); return; } if (tasks) { async.series(tasks, function (err, output) { if (err) { callback(err); } else { repoCreateResults = output; callback(null, args); } }); } else { callback(null, args); } }); }, //Step 9 - Add Comment to the created issue and close the issue. issueProviderInUse === false ? waterfallCallback : function (args, callback) { if (isApprovalRequired == true) { callback(null, args); return; } var commentBody = repoWorkflow.messageForAction('approve'); commentBody += '\n\n<small>This was generated by the Open Source Portal on behalf of ' + args.assignTo + '.</small>'; args.issueCloseComment = commentBody; var issue = workflowRepository.issue(args.issue.number); issue.createComment(commentBody, function (errIssueComment) { if (errIssueComment) { return callback('Repo is created but there was an error putting comment to an issue - ' + args.issue.number); } issue.close(function (errIssueClose) { if (errIssueClose) { return callback('Repo is created but there was an error closing an issue - ' + args.issue.number); } callback(null, args); }); }); }, isApprovalRequired === false || mailProviderInUse === false ? waterfallCallback : function sendMailToApprovers(args, callback) { // If approval is required, let's ask for approval now const approversAsString = approverMailAddresses.join(', '); const mail = { to: approverMailAddresses, subject: `New ${approvalRequest.org} repo ${approvalRequest.repoName} by ${userMailAddress}`, reason: (`You are receiving this e-mail because you are a repo approver for this organization. To stop receiving these mails, you can leave the repo approvals team on GitHub. This mail was sent to: ${approversAsString}`), headline: `New ${approvalRequest.org} repo requested`, classification: 'action', service: 'Microsoft GitHub', correlationId: req.correlationId, }; const contentOptions = { correlationId: req.correlationId, approvalRequest: approvalRequest, version: config.logging.version, actionUrl: approvalBaseUrl + generatedRequestId, reposSiteUrl: reposSiteBaseUrl, }; emailRender.render(req.app.settings.basedir, 'repoApprovals/pleaseApprove', contentOptions, (renderError, mailContent) => { if (renderError) { req.insights.trackException(renderError, { content: contentOptions, eventName: 'ReposRequestPleaseApproveMailRenderFailure', }); return callback(renderError); } mail.content = mailContent; mailProvider.sendMail(mail, (mailError, mailResult) => { const customData = { content: contentOptions, receipt: mailResult, }; if (mailError) { customData.eventName = 'ReposRequestPleaseApproveMailFailure'; req.insights.trackException(mailError, customData); return callback(mailError); } req.insights.trackEvent('ReposRequestPleaseApproveMailSuccess', customData); dc.updateApprovalRequest(generatedRequestId, { active: true, mailSentToApprovers: approversAsString, mailSentTo: userMailAddress, }, function (activateError) { callback(activateError, args); }); }); }); }, mailProviderInUse === false ? waterfallCallback : function sendEmail(args, callback) { // Let's send e-mail to the requester about this action const headline = isApprovalRequired ? 'Repo request submitted' : 'Repo ready'; const subject = isApprovalRequired ? `Your new repo request for "${approvalRequest.repoName}"` : `Your repo "${approvalRequest.repoName}" has been created`; const emailTemplate = isApprovalRequired ? 'repoApprovals/requestSubmitted' : 'repoApprovals/autoCreated'; const mail = { to: userMailAddress, subject: subject, reason: (`You are receiving this e-mail because you requested the creation of a repo. This mail was sent to: ${userMailAddress}`), headline: headline, classification: 'information', service: 'Microsoft GitHub', correlationId: req.correlationId, }; const contentOptions = { correlationId: req.correlationId, approvalRequest: approvalRequest, results: repoCreateResults, version: config.logging.version, reposSiteUrl: reposSiteBaseUrl, }; emailRender.render(req.app.settings.basedir, emailTemplate, contentOptions, (renderError, mailContent) => { if (renderError) { req.insights.trackException(renderError, { content: contentOptions, eventName: 'ReposRequestSubmittedMailRenderFailure', }); return callback(renderError); } mail.content = mailContent; mailProvider.sendMail(mail, (mailError, mailResult) => { const customData = { content: contentOptions, receipt: mailResult, }; if (mailError) { customData.eventName = 'ReposRequestSubmittedMailFailure'; req.insights.trackException(mailError, customData); return callback(mailError); } req.insights.trackEvent('ReposRequestSubmittedMailSuccess', customData); callback(null, args); }); }); }, //Step 10 - Update approval request record in the table. function (args, callback) { if (isApprovalRequired == true) { return callback(null, args); } var requestUpdates = { decision: 'approve', active: false, repoId: createdNewRepoDetails.id, decisionTime: (new Date().getTime()).toString(), decisionBy: oss.usernames.github, decisionNote: args.issueCloseComment, decisionEmail: oss.modernUser().contactEmail(), }; dc.updateApprovalRequest(args.requestId, requestUpdates, function (err) { if (err) { return callback('Repo is created but there was an error closing the request.'); } callback(null, args); }); } ], function (err) { if (err) { return next(err); } else { if (isApprovalRequired == true) { oss.render(req, res, 'message', 'Repo request submitted', { messageTitle: req.body.name.toUpperCase() + ' REPO', message: 'Your request has been submitted for review to the approvers group for the requested organization.' }); } else { if (createdNewRepoDetails && createdNewRepoDetails.name) { req.oss.saveUserAlert(req, `Your repo "${createdNewRepoDetails.name}" has been created.`, 'New GitHub repository created', 'success'); } oss.render(req, res, 'message', 'Repo request approved', { messageTitle: req.body.name.toUpperCase() + ' REPO', message: 'Your request has been completed and the repo created.', messageLink: createdNewRepoDetails.html_url, messageLinkTitle: `Open ${createdNewRepoDetails.full_name} on GitHub`, messageLinkTarget: 'new', }); } } }); }); router.get('/', function (req, res, next) { const languages = req.app.settings.runtimeConfig.github.gitignore.languages; const config = req.app.settings.runtimeConfig; var org = req.org; var orgName = org.name.toLowerCase(); const organization = req.app.settings.providers.operations.getOrganization(orgName); const createMetadata = organization.getRepositoryCreateMetadata(); var highlightedTeams = org.inner.settings.highlightedTeams; var allowPrivateRepos = org.inner.settings.type == 'publicprivate' || org.inner.settings.type == 'private'; var allowPublicRepos = org.inner.settings.type == 'publicprivate' || org.inner.settings.type == 'public'; if (org.inner.settings.createReposDirect) { return org.oss.render(req, res, 'org/requestRepo', 'Request a a new repository on GitHub.com', { orgName: orgName, orgConfig: org.inner.settings, org: org, }); } var claTeams = null; var orgHasCla = org.isLegacyClaAutomationAvailable(); try { claTeams = org.getLegacyClaTeams(true); } catch (noClaError) { /* ignored */ } org.getTeams(false /* do not use cached */, function (error, teams) { if (error) { return next(utils.wrapError(error, 'Could not read the entire list of read (pull) teams from GitHub. Please try again later or report this error if you continue seeing it.')); } var team = org.getRepoApproversTeam(false); getApproverMembers(team, function (error, approvers) { if (error) { return next(new Error('Could not retrieve the repo approvers for ' + orgName)); } var featuredTeamsCount = 0; var selectTeams = []; var i = 1; selectTeams.push({ number: i++, adminOnly: true, }); if (highlightedTeams !== undefined && highlightedTeams && highlightedTeams.length) { featuredTeamsCount = highlightedTeams.length; for (; i < featuredTeamsCount + 1; i++) { var ht = highlightedTeams[i - 1]; ht.number = i; ht.name = org.team(ht.id).name; selectTeams.push(ht); } } var allMembersTeam = org.getAllMembersTeam(); ++featuredTeamsCount; selectTeams.push({ number: i++, name: allMembersTeam.name, id: allMembersTeam.id, readOnly: true, info: 'This team contains all members of the "' + org.name + '" GitHub org who have onboarded and linked. Highly recommended for ease of read access.', }); for (; i < featuredTeamsCount + 4; i++) { selectTeams.push({ number: i }); } var approvalTypes = null; if (org.inner.settings.approvalTypes || config.github.approvalTypes.fields && config.github.approvalTypes.fields.approvalTypes) { approvalTypes = new Array(); var typesConfig = org.inner.settings.approvalTypes || config.github.approvalTypes.fields.approvalTypes; var urlRequiredConfig = org.inner.settings.approvalUrlRequired || config.github.approvalTypes.fields.approvalUrlRequired || []; var format = org.inner.settings.approvalUrlFormat || config.github.approvalTypes.fields.approvalUrlFormat; var exemptionDetailsConfig = org.inner.settings.exemptionDetailsRequired || config.github.approvalTypes.fields.exemptionDetailsRequired || []; for (var ctr = 0; ctr < typesConfig.length; ctr++) { approvalTypes.push({ value: typesConfig[ctr], urlRequired: urlRequiredConfig.indexOf(typesConfig[ctr]) >= 0, format: format, exemptionDetailsRequired: exemptionDetailsConfig.indexOf(typesConfig[ctr]) >= 0 }); } } org.oss.render(req, res, 'org/requestRepo', 'Request a a new repository', { orgName: orgName, orgConfig: org.inner.settings, allowPrivateRepos: allowPrivateRepos, allowPublicRepos: allowPublicRepos, orgHasCla: orgHasCla, claTeams: claTeams, approvers: approvers, teams: teams, org: org, selectTeams: selectTeams, templates: createMetadata.templates, approvalTypes: approvalTypes, languages: languages, }); }); }); }); function getApproverMembers(team, cb) { if (team == null) { cb(null, []); return; } team.getMemberLinks(cb); } function getRequestApprovalPkg(requestId, oss, dc, cb) { dc.getApprovalRequest(requestId, function (error, pendingRequest) { if (error) { cb(utils.wrapError(error, 'The pending request you are looking for does not seem to exist.'), null); } var userHash = {}; userHash[pendingRequest.ghu] = pendingRequest.ghid; var requestingUser = null; oss.getCompleteUsersFromUsernameIdHash(userHash, function (error, users) { if (!error && !users[pendingRequest.ghu]) { error = new Error('Could not create an object to track the requesting user.'); } if (error) { return cb(error); } requestingUser = users[pendingRequest.ghu]; var approvalPackage = { request: pendingRequest, requestingUser: requestingUser, id: requestId, }; cb(null, approvalPackage); }); }); } module.exports = router;<file_sep>/lib/encryption.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; // ---------------------------------------------------------------------------- // This is a Node.js implementation of client-side table entity encryption, // compatible with the official Azure storage .NET library. // ---------------------------------------------------------------------------- const async = require('async'); const crypto = require('crypto'); const jose = require('node-jose'); // ---------------------------------------------------------------------------- // Azure Storage .NET client library - entity encryption keys: // // Key: _ClientEncryptionMetadata1 // Type: JSON stringified object // Purpose: Contains information about the client encryption agent used to // encrypt the entity. Contains a uniquely generated content // encryption key for the specific row of data. // Constant: tableEncryptionKeyDetails // // Key: _ClientEncryptionMetadata2 // Type: Binary buffer // Purpose: Encrypted JSON stringified object containing the list of encrypted // fields in the entity. // Constant: tableEncryptionPropertyDetails // ---------------------------------------------------------------------------- const tableEncryptionPropertyDetails = '_ClientEncryptionMetadata2'; const tableEncryptionKeyDetails = '_ClientEncryptionMetadata1'; // ---------------------------------------------------------------------------- // Azure Storage encryption agent values: as implemented today, the encryption // agent for .NET is of version 1.0; initialization vectors are 16-bytes, // CEKs are 32-bytes, etc. The agent includes the AES algorithm used for // content keys, but the algorithm is the .NET framework-recognized value and // not the OpenSSL defined constant. We maintain a map therefore to map // between the two, but only those which are currently supported by the .NET // Azure Storage library. // ---------------------------------------------------------------------------- const azureStorageEncryptionAgentProtocol = '1.0'; const azureStorageKeyWrappingAlgorithm = 'A256KW'; const azureStorageContentEncryptionIVBytes = 16; const azureStorageContentEncryptionKeyBytes = 32; const azureStorageEncryptionAgentEncryptionAlgorithm = 'AES_CBC_256' /* .NET value */; const mapDotNetFrameworkToOpenSslAlgorithm = new Map([[azureStorageEncryptionAgentEncryptionAlgorithm, 'aes-256-cbc']]); function openSslFromNetFrameworkAlgorithm(algorithm) { const openSslAlgorithm = mapDotNetFrameworkToOpenSslAlgorithm.get(algorithm); if (openSslAlgorithm === undefined) { throw new Error(`The OpenSSL algorithm constant for the .NET Framework value "${algorithm}" is not defined or tested.`); } return openSslAlgorithm; } // ---------------------------------------------------------------------------- // Hash, encrypt, decrypt, wrap, unwrap and key generation routines // ---------------------------------------------------------------------------- function getSha256Hash(buffer) { return crypto.createHash('sha256').update(buffer).digest(); } function encryptValue(contentEncryptionKey, iv, value) { const cipher = crypto.createCipheriv(openSslFromNetFrameworkAlgorithm(azureStorageEncryptionAgentEncryptionAlgorithm), contentEncryptionKey, iv); return Buffer.concat([cipher.update(value), cipher.final()]); } function decryptValue(algorithm, contentEncryptionKey, iv, encryptedValue) { const decipher = crypto.createDecipheriv(algorithm, contentEncryptionKey, iv); return Buffer.concat([decipher.update(encryptedValue), decipher.final()]); } function generate32bitKey(callback) { crypto.randomBytes(azureStorageContentEncryptionKeyBytes, callback); } function generateContentEncryptionKey(callback) { crypto.randomBytes(azureStorageContentEncryptionIVBytes, (cryptoError, contentEncryptionIV) => { if (cryptoError) { return callback(cryptoError); } generate32bitKey((createKeyError, contentEncryptionKey) => { if (createKeyError) { return callback(createKeyError); } callback(null, contentEncryptionIV, contentEncryptionKey); }); }); } function wrapContentKey(keyWrappingAlgorithm, keyEncryptionKey, contentEncryptionKey, callback) { jose.JWA.encrypt(keyWrappingAlgorithm, keyEncryptionKey, contentEncryptionKey) .then((result) => { return callback(null, result.data); }, callback); } function unwrapContentKey(keyWrappingAlgorithm, keyEncryptionKey, wrappedContentKeyEncryptedKey, callback) { jose.JWA.decrypt(keyWrappingAlgorithm, keyEncryptionKey, wrappedContentKeyEncryptedKey) .then((contentEncryptionKey) => { return callback(null, contentEncryptionKey); }, callback); } // ---------------------------------------------------------------------------- // Azure encryption metadata object // ---------------------------------------------------------------------------- function createEncryptionData(keyId, wrappedContentEncryptionKey, contentEncryptionIV, keyWrappingAlgorithm) { const encryptionData = { /* PascalCase object per the .NET library */ WrappedContentKey: { KeyId: keyId, EncryptedKey: base64StringFromBuffer(wrappedContentEncryptionKey), Algorithm: keyWrappingAlgorithm, }, EncryptionAgent: { Protocol: azureStorageEncryptionAgentProtocol, EncryptionAlgorithm: azureStorageEncryptionAgentEncryptionAlgorithm, }, ContentEncryptionIV: base64StringFromBuffer(contentEncryptionIV), KeyWrappingMetadata: {}, }; return encryptionData; } function validateEncryptionData(encryptionData) { if (!encryptionData || !encryptionData.EncryptionAgent) { throw new Error('No encryption data or encryption data agent.'); } const agent = encryptionData.EncryptionAgent; if (!agent.Protocol) { throw new Error('Encryption agent protocol version must be present in the encryption data properties.'); } if (agent.Protocol !== azureStorageEncryptionAgentProtocol) { throw new Error(`Encryption agent value "${agent.EncryptionAgent}" is not recognized or tested with this library.`); } if (!agent.EncryptionAlgorithm) { throw new Error('Encryption algorithm type must be present in the encryption data properties.'); } if (!mapDotNetFrameworkToOpenSslAlgorithm.get(agent.EncryptionAlgorithm)) { throw new Error(`Encryption agent value "${agent.EncryptionAgent}" is not recognized or tested with this library.`); } } function resolveKeyEncryptionKeyFromOptions(encryptionOptions, keyId, callback) { if (!encryptionOptions) { return callback(new Error('Encryption options must be specified.')); } if (!keyId) { throw new Error('No key encryption key ID provided.'); } if ((!encryptionOptions.keyEncryptionKeys || typeof encryptionOptions.keyEncryptionKeys !== 'object') && (!encryptionOptions.keyResolver || typeof encryptionOptions.keyResolver !== 'function')) { return callback(new Error('Encryption options must provide either a "keyResolver" function or "keyEncryptionKeys" object.')); } const resolver = encryptionOptions.keyResolver || function (keyId, callback) { const key = encryptionOptions.keyEncryptionKeys[keyId]; callback(null, key); }; resolver(keyId, (resolveError, key) => { if (resolveError) { return callback(resolveError); } if (!key) { return callback(new Error(`We were not able to retrieve a key with identifier "${keyId}".`)); } return callback(null, bufferFromBase64String(key)); }); } // ---------------------------------------------------------------------------- // Compute Truncated Column Hash: // Each encrypted entity (row) has its own content encryption key, init vector, // and then each column is encrypted using an IV that comes from key table // properties, the row identity and the column name. // ---------------------------------------------------------------------------- function computeTruncatedColumnHash(contentEncryptionIV, partitionKey, rowKey, columnName) { // IMPORTANT: // The .NET storage library (the reference implementation for Azure client-side // storage) has a likely bug in the ordering and concatenation of parameters // to generate the truncated column hash; it uses string.Join(partitionKey, rowKey, column) // instead of String.Concat. The likely original intention of the author seems // to be a concat in the order of partition key, row key, and then column, but // instead the resulting string is actually row key, partition key, column, // because string.Join treats the first parameter (the partition key in this // case) as the separator for joining an array of values. This code uses array // join to identically reproduce the .NET behavior here so that the two // implementations remain compatible. const columnIdentity = new Buffer([rowKey, columnName].join(partitionKey), 'utf8'); const combined = Buffer.concat([contentEncryptionIV, columnIdentity]); const hash = getSha256Hash(combined); return hash.slice(0, azureStorageContentEncryptionIVBytes); } // ---------------------------------------------------------------------------- // Buffer/string functions // ---------------------------------------------------------------------------- function base64StringFromBuffer(val) { return Buffer.isBuffer(val) ? val.toString('base64') : val; } function bufferFromBase64String(val) { return Buffer.isBuffer(val) ? val : new Buffer(val, 'base64'); } function translateBuffersToBase64(properties) { for (const key in properties) { if (Buffer.isBuffer(properties[key])) { properties[key] = base64StringFromBuffer(properties[key]); } } return properties; } // ---------------------------------------------------------------------------- // The default encryption resolver implementation: given a list of properties // to encrypt, return true when that property is being processed. // ---------------------------------------------------------------------------- function createDefaultEncryptionResolver(propertiesToEncrypt) { const encryptedKeySet = new Set(propertiesToEncrypt); // Default resolver does not use partition/row, but user could return (partition, row, name) => { return encryptedKeySet.has(name); }; } function encryptProperty(contentEncryptionKey, contentEncryptionIV, partitionKey, rowKey, property, value) { let columnIV = computeTruncatedColumnHash(contentEncryptionIV, partitionKey, rowKey, property); // Store the encrypted properties as binary values on the service instead of // base 64 encoded strings because strings are stored as a sequence of WCHARs // thereby further reducing the allowed size by half. During retrieve, it is // handled by the response parsers correctly even when the service does not // return the type for JSON no-metadata. return encryptValue(contentEncryptionKey, columnIV, value); } function decryptProperty(aesAlgorithm, contentEncryptionKey, contentEncryptionIV, partitionKey, rowKey, propertyName, encryptedValue) { const columnIV = computeTruncatedColumnHash(contentEncryptionIV, partitionKey, rowKey, propertyName); return decryptValue(aesAlgorithm, contentEncryptionKey, columnIV, bufferFromBase64String(encryptedValue)); } function encryptProperties(encryptionResolver, contentEncryptionKey, contentEncryptionIV, partitionKey, rowKey, unencryptedProperties, callback) { const encryptedProperties = {}; const encryptedPropertiesList = []; if (!unencryptedProperties) { return callback(new Error('The entity properties are not set.')); } async.forEachOf(unencryptedProperties, (value, property, next) => { if (property === tableEncryptionKeyDetails || property === tableEncryptionPropertyDetails) { return next(new Error('A table encryption property is present in the entity properties to consider for encryption. The property must be removed.')); } if (property === 'PartitionKey' || property === 'RowKey') { encryptedProperties[property] = value; return next(); } if (property === 'Timestamp') { return next(); } if (encryptionResolver(partitionKey, rowKey, property) !== true) { encryptedProperties[property] = value; return next(); } if (value === undefined || value === null) { return next(new Error(`Null or undefined properties cannot be encrypted. Property in question: ${property}`)); } let type = typeof value; if (type !== 'string') { return next(new Error(`${type} properties cannot be encrypted; property in question: ${property}`)); } const encryptedValue = encryptProperty(contentEncryptionKey, contentEncryptionIV, partitionKey, rowKey, property, value); encryptedPropertiesList.push(property); encryptedProperties[property] = encryptedValue; next(); }, (asyncError) => { if (asyncError) { return callback(asyncError); } callback(null, encryptedProperties, encryptedPropertiesList); }); } function decryptProperties(allEntityProperties, encryptedPropertyNames, partitionKey, rowKey, contentEncryptionKey, encryptionData, contentEncryptionIV) { validateEncryptionData(encryptionData); const aesAlgorithm = openSslFromNetFrameworkAlgorithm(encryptionData.EncryptionAgent.EncryptionAlgorithm); const decryptedProperties = {}; for (const key in allEntityProperties) { if (key === tableEncryptionKeyDetails || key === tableEncryptionPropertyDetails) { continue; } if (!encryptedPropertyNames.has(key)) { decryptedProperties[key] = allEntityProperties[key]; continue; } const value = decryptProperty(aesAlgorithm, contentEncryptionKey, contentEncryptionIV, partitionKey, rowKey, key, allEntityProperties[key]); decryptedProperties[key] = value.toString('utf8'); } return decryptedProperties; } function encryptEntity(partitionKey, rowKey, properties, encryptionOptions, callback) { if (!partitionKey || !rowKey || !properties) { return callback(new Error('Must provide a partition key, row key and properties for the entity.')); } const returnBinaryProperties = encryptionOptions.binaryProperties || 'buffer'; if (returnBinaryProperties !== 'base64' && returnBinaryProperties !== 'buffer') { return callback(new Error('The binary properties value is not valid. Please provide "buffer" or "base64".')); } const keyEncryptionKeyId = encryptionOptions.keyEncryptionKeyId; resolveKeyEncryptionKeyFromOptions(encryptionOptions, keyEncryptionKeyId, (keyLocateError, keyEncryptionKey) => { if (keyLocateError) { return callback(keyLocateError); } let encryptionResolver = encryptionOptions.encryptionResolver; if (!encryptionResolver) { const propertiesToEncrypt = encryptionOptions.encryptedPropertyNames; if (!propertiesToEncrypt) { return callback(new Error('Encryption options must contain either a list of properties to encrypt or an encryption resolver.')); } encryptionResolver = createDefaultEncryptionResolver(propertiesToEncrypt); } generateContentEncryptionKey((generateKeyError, contentEncryptionIV, contentEncryptionKey) => { if (generateKeyError) { return callback(generateKeyError); } const keyWrappingAlgorithm = azureStorageKeyWrappingAlgorithm; wrapContentKey(keyWrappingAlgorithm, keyEncryptionKey, contentEncryptionKey, (wrapError, wrappedContentEncryptionKey) => { if (wrapError) { return callback(wrapError); } encryptProperties(encryptionResolver, contentEncryptionKey, contentEncryptionIV, partitionKey, rowKey, properties, (encryptError, encryptedProperties, encryptionPropertyDetailsSet) => { if (encryptError) { return callback(encryptError); } if (encryptionPropertyDetailsSet.length === 0) { return callback(null, encryptedProperties); } const metadataSerialized = JSON.stringify(encryptionPropertyDetailsSet); encryptedProperties[tableEncryptionPropertyDetails] = encryptProperty(contentEncryptionKey, contentEncryptionIV, partitionKey, rowKey, tableEncryptionPropertyDetails, metadataSerialized); encryptedProperties[tableEncryptionKeyDetails] = JSON.stringify(createEncryptionData(keyEncryptionKeyId, jose.util.asBuffer(wrappedContentEncryptionKey), contentEncryptionIV, keyWrappingAlgorithm)); if (returnBinaryProperties === 'base64') { translateBuffersToBase64(encryptedProperties); } return callback(null, encryptedProperties); }); }); }); }); } function decryptEntity(partitionKey, rowKey, properties, encryptionOptions, callback) { if (!partitionKey || !rowKey || !properties) { return callback(new Error('A partition key, row key and properties must be provided.')); } const returnBinaryProperties = encryptionOptions.binaryProperties || 'buffer'; if (returnBinaryProperties !== 'base64' && returnBinaryProperties !== 'buffer') { return callback(new Error('The binary properties value is not valid. Please provide "buffer" or "base64".')); } let detailsValue = properties[tableEncryptionKeyDetails]; if (detailsValue === undefined) { return callback(null, properties); } let tableEncryptionKey = null; try { tableEncryptionKey = JSON.parse(detailsValue); } catch (parseError) { return callback(parseError); } const iv = bufferFromBase64String(tableEncryptionKey.ContentEncryptionIV); const wrappedContentKey = tableEncryptionKey.WrappedContentKey; if (wrappedContentKey.Algorithm !== azureStorageKeyWrappingAlgorithm) { return callback(new Error(`The key wrapping algorithm "${wrappedContentKey.Algorithm}" is not tested or supported in this library.`)); } const keyWrappingAlgorithm = wrappedContentKey.Algorithm; const wrappedContentKeyIdentifier = wrappedContentKey.KeyId; const wrappedContentKeyEncryptedKey = bufferFromBase64String(wrappedContentKey.EncryptedKey); const aesAlgorithm = openSslFromNetFrameworkAlgorithm(tableEncryptionKey.EncryptionAgent.EncryptionAlgorithm); resolveKeyEncryptionKeyFromOptions(encryptionOptions, wrappedContentKeyIdentifier, (kvkLocateError, kvk) => { if (kvkLocateError) { return callback(kvkLocateError); } const keyEncryptionKeyValue = bufferFromBase64String(kvk); unwrapContentKey(keyWrappingAlgorithm, keyEncryptionKeyValue, wrappedContentKeyEncryptedKey, (unwrapError, contentEncryptionKey) => { if (unwrapError) { return callback(unwrapError); } const metadataIV = computeTruncatedColumnHash(iv, partitionKey, rowKey, tableEncryptionPropertyDetails); const tableEncryptionDetails = bufferFromBase64String(properties[tableEncryptionPropertyDetails]); try { const decryptedPropertiesSet = decryptValue(aesAlgorithm, contentEncryptionKey, metadataIV, tableEncryptionDetails); const listOfEncryptedProperties = JSON.parse(decryptedPropertiesSet.toString('utf8')); const decrypted = decryptProperties(properties, new Set(listOfEncryptedProperties), partitionKey, rowKey, contentEncryptionKey, tableEncryptionKey, iv); if (returnBinaryProperties === 'base64') { translateBuffersToBase64(decrypted); } return callback(null, decrypted); } catch (error) { return callback(error); } }); }); } module.exports = { decryptEntity: decryptEntity, encryptEntity: encryptEntity, }; <file_sep>/webhooks/tasks/repository.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["dir", "log"] }] */ 'use strict'; module.exports = { filter: function (data) { let eventType = data.properties.event; return eventType === 'repository'; }, run: function (operations, organization, data, callback) { const event = data.body; const immediateRefreshOptions = { backgroundRefresh: false, maxAgeSeconds: 0.01, }; let update = false; if (event.action === 'created') { console.log(`repo created: ${event.repository.full_name} ${event.repository.private === 'private' ? 'private' : 'public'} by ${event.sender.login}`); update = true; } else if (event.action === 'deleted') { console.log(`repo DELETED: ${event.repository.full_name} ${event.repository.private === 'private' ? 'private' : 'public'} by ${event.sender.login}`); update = true; } else if (event.action === 'publicized') { console.log('a repo went public!'); // TODO: refresh repos list here, too // TODO: refresh the specific repo entry } else { console.log('other repo condition:'); console.dir(data); } if (update) { // CONSIDER: When to update the entire org list? operations.getRepos() would be cross-org organization.getRepositories(immediateRefreshOptions, () => { console.log('refreshed repos list after ADD'); const crossOrgRefreshOptions = { backgroundRefresh: false, maxAgeSeconds: 15, }; operations.getRepos(crossOrgRefreshOptions, () => { console.log('refreshed cross-org repos list with 15s buffer'); }); }); } // Immediately, to help delete the ticket callback(); }, }; <file_sep>/webhooks/tasks/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = [ require('./automaticTeams'), require('./membership'), require('./organization'), require('./repository'), ]; <file_sep>/data.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; /*eslint no-console: ["error", { allow: ["warn"] }] */ // This file is very sad. :( // This is the original data interface for this portal. It uses Azure // table storage and its Node.js SDK. const _ = require('lodash'); const azure = require('azure-storage'); const async = require('async'); const uuid = require('node-uuid'); const os = require('os'); var staticHostname = os.hostname().toString(); function DataClient(options, callback) { if (options.config === undefined) { return callback(new Error('Configuration must be provided to the data client.')); } var storageAccountName = options.config.github.links.table.account; var storageAccountKey = options.config.github.links.table.key; var prefix = options.config.github.links.table.prefix; try { if (!storageAccountName || !storageAccountKey) { throw new Error('Storage account information is not configured.'); } this.table = azure.createTableService(storageAccountName, storageAccountKey); } catch (storageAccountError) { return callback(storageAccountError); } this.entGen = azure.TableUtilities.entityGenerator; if (prefix === undefined) { prefix = ''; } this.options = { partitionKey: prefix + 'pk', linksTableName: prefix + 'links', pendingApprovalsTableName: prefix + 'pending', errorsTableName: prefix + 'errors', settingsTableName: `${prefix}settings`, encryption: options.config.github.links.table.encryption, }; if (this.options.encryption === true) { const encryptColumns = new Set(['githubToken', 'githubTokenIncreasedScope']); const encryptionOptions = { keyEncryptionKeyId: options.config.github.links.table.encryptionKeyId, keyResolver: options.keyEncryptionKeyResolver, encryptedPropertyNames: encryptColumns, binaryProperties: 'buffer', tableDehydrator: reduceEntity, tableRehydrator: this.createEntity.bind(this), }; const tableClient = this.table; this.table = require('./lib/tableEncryption')(tableClient, encryptionOptions); } var dc = this; var tableNames = [ dc.options.linksTableName, dc.options.pendingApprovalsTableName, dc.options.errorsTableName, dc.options.settingsTableName, ]; async.each(tableNames, function (tableName, callback) { dc.table.createTableIfNotExists(tableName, callback); }, function (error) { if (callback) return callback(error, dc); }); } var reduceEntity = function reduceEntity(instance) { if (instance === undefined || instance === null) { return instance; } for (var column in instance) { if (instance[column] && instance[column]._ !== undefined) { instance[column] = instance[column]._; } } return instance; }; DataClient.prototype.reduceEntity = reduceEntity; DataClient.prototype.requestToUserInformation = function rtui(req) { var info = { ghid: undefined, ghu: undefined, aad: undefined, }; if (req && req.user && req.user.github && req.user.github.id) { info.ghid = req.user.github.id; if (info.ghid.toString) { info.ghid = info.ghid.toString(); } if (req.user.github.username) { info.ghu = req.user.github.username; } } if (req && req.user && req.user.azure && req.user.azure.username) { info.aad = req.user.azure.username; } return info; }; DataClient.prototype.insertErrorLogEntry = function insertErrorEntry(version, req, err, meta, callback) { // generic configuration, should move out at some point... var storeUnknownUserErrors = false; var storeRequestInformation = true; var cbNoErrors = function (callback) { if (callback) { callback(); } }; var dc = this; var entity; // (PartitionKey, RowKey): (ghid || 0, new uuid) // (ghu, ghid, aad): user information // (t, cid): (time when method called, correlation ID) // (e, json, meta): (error message, JSON serialized err, JSON metadata) // (url, host, ...): various host and request informational fields try { var info = dc.requestToUserInformation(req); // We may encounter users without a session. In these cases, we could log with -1 ID for pkey (OR use correlation ID for the pkey... hmm.) if (info.ghid === undefined) { if (!storeUnknownUserErrors) { return cbNoErrors(callback); } info.ghid = -1; } info.v = version; if (req.headers && req.headers.referer) { info.referer = req.headers.referer; } var partitionKey = info.ghid; var uniqueErrorId = uuid.v4(); entity = dc.createEntity(partitionKey, uniqueErrorId, info); var errorMessage = 'The error object was undefined.'; var errorJson; var errorStack; var errorStatus = '200'; if (err) { // If err.meta is set, use that for the metadata up-level, and remove from err object. if (err.meta && !meta) { meta = err.meta; delete err.meta; } errorStack = err.stack; if (err.status) { errorStatus = err.status; // delete err.status; // ? may not want to do this... } if (err.message) { errorMessage = err.message; } else { if (err.toString) { errorMessage = err.toString(); } else { errorMessage = 'The provided error instance is not a string and has no toString method.'; } } try { errorJson = JSON.stringify(err); } catch (je) { // Ignore any serialization errors or circular reference problems, the rest will still be logged in this case. } } var metaJson; if (meta) { try { metaJson = JSON.stringify(meta); } catch (je) { // Ignore. } } var errorEntity = { t: new Date().getTime(), cid: (req && req.correlationId ? req.correlationId : undefined), e: errorMessage, stack: errorStack, json: errorJson, meta: metaJson, status: errorStatus, 'new': true }; dc.mergeIntoEntity(entity, errorEntity); if (storeRequestInformation) { var sri = { url: req.scrubbedUrl || req.originalUrl || req.url, ua: req.headers['user-agent'], host: staticHostname }; dc.mergeIntoEntity(entity, sri); } } catch (ex) { // Retry policy could be nice, OR log this separately if possible. return cbNoErrors(callback); } if (entity) { dc.table.insertEntity(dc.options.errorsTableName, entity, function (/* ignoredError */) { cbNoErrors(callback); }); } else { cbNoErrors(callback); } }; DataClient.prototype.updateError = function (partitionKey, rowKey, mergeEntity, callback) { var dc = this; var entity = dc.createEntity(partitionKey, rowKey, mergeEntity); console.warn('This method does not work with encryption at this time.'); dc.table.mergeEntity(dc.options.errorsTableName, entity, callback); }; DataClient.prototype.removeError = function (partitionKey, rowKey, callback) { var dc = this; dc.table.deleteEntity(dc.options.errorsTableName, dc.createEntity(partitionKey, rowKey), callback); }; DataClient.prototype.getActiveErrors = function (correlationId, callback) { var dc = this; // Correlation ID is optional if (typeof (correlationId) === 'function') { callback = correlationId; correlationId = undefined; } var done = false; var continuationToken = null; var entries = []; async.whilst( function () { return !done; }, function (asyncCallback) { var query = new azure.TableQuery() .where('new eq ?', true); if (correlationId) { query.and.apply(query, ['cid eq ?', correlationId]); } dc.table.queryEntities(dc.options.errorsTableName, query, continuationToken, function (error, results) { if (error) { done = true; return asyncCallback(error); } if (results.continuationToken) { continuationToken = results.continuationToken; } else { done = true; } if (results && results.entries && results.entries.length) { for (var i = 0; i < results.entries.length; i++) { entries.push(reduceEntity(results.entries[i])); } } asyncCallback(); }); }, function (error) { if (error) { return callback(error); } async.sortBy(entries, function (entity, scb) { var t; var err = null; try { t = Math.round(entity.t) * -1; } catch (trx) { err = trx; } return scb(err, t); }, callback); }); }; DataClient.prototype.mergeIntoEntity = function mit(entity, obj, callback) { var dc = this; if (obj) { for (var key in obj) { // Currently stripping metadata if (key === '.metadata') { continue; } if (obj[key] === undefined || obj[key] === null) { // Skip undefined/null objects, including the key continue; } if (typeof obj[key] === 'string') { entity[key] = dc.entGen.String(obj[key]); } else if (obj[key] === true) { entity[key] = dc.entGen.Boolean(true); } else if (obj[key] === false) { entity[key] = dc.entGen.Boolean(false); } else if (Buffer.isBuffer(obj[key])) { entity[key] = dc.entGen.Binary(obj[key]); } else if (obj[key] instanceof Date) { entity[key] = dc.entGen.DateTime(obj[key]); } else if (typeof obj[key] === 'number') { // Opinionated entity processing: store all numbers as strings entity[key] = dc.entGen.String(obj[key].toString()); } else { console.warn('Consider whether a new entity merge clause is required for key ' + key + ' of type:' + typeof obj[key]); if (obj[key].toString) { entity[key] = dc.entGen.String(obj[key].toString()); } else { entity[key] = dc.entGen.String(obj[key]); } } } } if (callback) { callback(null, entity); } else { return entity; } }; DataClient.prototype.createEntity = function ce(partitionKey, rowKey, obj, callback) { var dc = this; if (typeof (obj) === 'function') { callback = obj; obj = undefined; } var entity = { PartitionKey: dc.entGen.String(partitionKey), RowKey: dc.entGen.String(rowKey) }; if (obj) { dc.mergeIntoEntity(entity, obj); } if (callback) { return callback(null, entity); } else { return entity; } }; // links // ----- // CONSIDER: Replace link calls with reduced entity "association" calls, then depre. & remove these funcs. DataClient.prototype.createLinkObjectFromRequest = function createLinkObject(req, callback) { if (req && req.user && req.user.github && req.user.azure && req.user.github.username && req.user.github.id && req.user.azure.username && req.user.azure.oid) { var link = { ghu: req.user.github.username, ghid: req.user.github.id.toString(), aadupn: req.user.azure.username, aadname: req.user.azure.displayName, aadoid: req.user.azure.oid, joined: new Date(), }; link.ghavatar = req.user.github.avatarUrl; if (req.user.github.accessToken) { link.githubToken = req.user.github.accessToken; link.githubTokenUpdated = new Date().getTime(); } if (req.user.githubIncreasedScope && req.user.githubIncreasedScope.accessToken) { link.githubTokenIncreasedScope = req.user.githubIncreasedScope.accessToken; link.githubTokenIncreasedScopeUpdated = new Date().getTime(); } return callback(null, link); } else { return callback(new Error('Not all fields needed for creating a link are available and authenticated. This may be a temporary problem or an implementation bug.')); } }; DataClient.prototype.getUserLinks = function gul(users, callback) { var dc = this; var query = new azure.TableQuery() .where('PartitionKey eq ?', this.options.partitionKey); if (!(users && users.length && users.length > 0)) { return callback(new Error('Must include an array of GitHub user IDs, and at least one in that array.')); } var clauses = []; if (users.length > 250) { // TODO: Write better code here to use continuation tokens and utilities to resolve any number from storage. return callback(new Error(`The application has queried for ${users.length} entities, which is too many for the current design.`)); } for (var i = 0; i < users.length; i++) { clauses.push('ghid eq ?string?'); } var args = [clauses.join(' or ')].concat(users); query.and.apply(query, args); dc.table.queryEntities(dc.options.linksTableName, query, null, function (error, results, headers) { if (error) { error.headers = headers; return callback(error); } var entries = []; if (results && results.entries && results.entries.length) { for (var i = 0; i < results.entries.length; i++) { entries.push(reduceEntity(results.entries[i])); } } async.sortBy(entries, function (user, sortCallback) { var value = user.aadupn || user.aadname || user.ghu || user.ghid; if (value.toLowerCase) { value = value.toLowerCase(); } sortCallback(null, value); }, callback); }); }; DataClient.prototype.getUserLinkByUsername = function gulbyu(githubUsername, callback) { this.getUserLinkByProperty('ghu', githubUsername, function (error, data) { if (error) return callback(error); if (data && data.length) { if (data.length === 1) { callback(null, data[0]); } else { if (data.length === 0) { callback(null, false); } else { callback(new Error('Multiple entries returned. The data may be consistent. Please file a bug.')); } } } else { callback(new Error('No results.')); } }); }; DataClient.prototype.updateLink = function updl(userid, replaceEntity, callback) { var dc = this; if (userid === undefined) { return callback(new Error('The GitHub ID is undefined.')); } if (typeof userid != 'string') { userid = userid.toString(); } var entity = dc.createEntity(dc.options.partitionKey, userid, replaceEntity); dc.table.replaceEntity(dc.options.linksTableName, entity, callback); }; DataClient.prototype.getUserByAadUpn = function gubauapn(employeeAlias, callback) { this.getUserLinkByProperty('aadupn', employeeAlias.toLowerCase(), callback); }; DataClient.prototype.getUserByAadOid = function getByOid(oid, callback) { this.getUserLinkByProperty('aadoid', oid, callback); }; function getUserLinkByPropertyOneAttempt(dc, propertyName, value, callback) { 'use strict'; const query = new azure.TableQuery() .where(propertyName + ' eq ?', value); dc.table.queryEntities(dc.options.linksTableName, query, null, function (error, results) { if (error) return callback(error); const entries = []; if (results && results.entries && results.entries.length) { for (let i = 0; i < results.entries.length; i++) { entries.push(reduceEntity(results.entries[i])); } } callback(null, entries); }); } function getUserLinkByPropertyRetryOnEmptyResults(dc, propertyName, value, callback) { 'use strict'; let mostRecentEntries = null; // Wrap the one-time query operation; local to this function an error is simulated // for empty results (which are valid) to reuse the async library's retry logic. const getAndEmptyAsError = (wrappedFunctionCallback) => { getUserLinkByPropertyOneAttempt(dc, propertyName, value, (error, results) => { if (!error && results && Array.isArray(results) && results.length === 0) { error = new Error('No results were returned from the link by property query. This message should not be seen in production environments.'); error.simulated = true; } mostRecentEntries = results; return wrappedFunctionCallback(error, results); }); }; async.retry({ times: 3, // Immediately return is an actual error errorFilter: function (err) { return err.simulated === true; }, // Exponential backoff interval: function (retryCount) { return 50 * Math.pow(2, retryCount); } }, getAndEmptyAsError, (retryError) => { if (retryError && retryError.simulated === true) { retryError = null; } return callback(retryError, retryError ? undefined : mostRecentEntries); }); } DataClient.prototype.getUserLinkByProperty = function gulbprop(propertyName, value, callback) { // This is an important function that calls Azure to retrieve the link // for a user. A query operation is used and sometimes returns an empty // result set, even though the link exists. This robustness improvement // is targeted for now; it will use a short exponential backoff retry // whenever an empty result set is returned. getUserLinkByPropertyRetryOnEmptyResults(this, propertyName, value, callback); }; DataClient.prototype.getLink = function getLink(githubId, callback) { var dc = this; if (githubId === undefined) { return callback(new Error('The GitHub ID is undefined.')); } if (typeof githubId != 'string') { githubId = githubId.toString(); } dc.table.retrieveEntity(dc.options.linksTableName, dc.options.partitionKey, githubId, function (error, result, response) { if (error && !result) { // This routine returns no error and a false 'link' when an entity is // missing, but we still want to return an error for anything else, // especially if there is encryption configured. if (error.statusCode == 404 && error.code === 'ResourceNotFound') { error = null; } return callback(error, false); } return callback(error, result, response); }); }; DataClient.prototype.getAllEmployees = function getAllEmployees(options, callback) { if (!callback && typeof(options) === 'function') { callback = options; options = {}; } let columns = ['aadupn', 'ghu', 'ghid', 'PartitionKey', 'RowKey']; if (options.includeNames) { columns.push('aadname'); } if (options.includeId) { columns.push('aadoid'); } if (options.includeServiceAccounts) { columns.push('serviceAccount'); columns.push('serviceAccountMail'); } if (options.all) { columns = undefined; } var dc = this; var pageSize = 500; var employees = []; var done = false; var continuationToken = null; async.whilst( function areWeDone() { return !done; }, function grabPage(cb) { var query = new azure.TableQuery() .select(columns) .top(pageSize); dc.table.queryEntities(dc.options.linksTableName, query, continuationToken, function (error, results) { if (error) { done = true; return cb(error); } if (results.continuationToken) { continuationToken = results.continuationToken; } else { done = true; } if (results && results.entries && results.entries.length) { for (var i = 0; i < results.entries.length; i++) { employees.push(reduceEntity(results.entries[i])); } } cb(); }); }, function (error) { if (error) return callback(error); employees.forEach(account => { if (account.aadupn) { account.aadupn = account.aadupn.toLowerCase(); } }); const sorted = _.sortBy(employees, ['aadupn', 'ghu']); callback(null, sorted); }); }; DataClient.prototype.insertLink = function insertLink(githubId, details, callback) { var dc = this; if (githubId === undefined) { return callback(new Error('The GitHub ID is undefined.')); } if (typeof githubId !== 'string') { githubId = githubId.toString(); } var entity = dc.createEntity(dc.options.partitionKey, githubId, details); dc.table.insertEntity(dc.options.linksTableName, entity, callback); }; DataClient.prototype.removeLink = function removeLink(githubId, callback) { var dc = this; if (githubId === undefined) { return callback(new Error('The GitHub ID is undefined.')); } if (typeof githubId != 'string') { githubId = githubId.toString(); } dc.table.deleteEntity(dc.options.linksTableName, dc.createEntity(dc.options.partitionKey, githubId), callback); }; // basic settings interface // ------------------------ DataClient.prototype.getSetting = function (partitionKey, rowKey, callback) { getReducedEntity(this, this.options.settingsTableName, partitionKey, rowKey, callback); }; DataClient.prototype.setSetting = function (partitionKey, rowKey, value, callback) { const entity = this.createEntity(partitionKey, rowKey, value); this.table.insertEntity(this.options.settingsTableName, entity, callback); }; DataClient.prototype.deleteSetting = function (partitionKey, rowKey, callback) { this.table.deleteEntity(this.options.settingsTableName, this.createEntity(partitionKey, rowKey), callback); }; // pending approvals workflow // -------------------------- DataClient.prototype.getPendingApprovals = function getPendingApprovals(teamsIn, callback) { var dc = this; var teams = null; var i; if (typeof teamsIn === 'number') { teams = [teamsIn.toString()]; } else if (typeof teamsIn === 'string') { teams = [teamsIn]; } else if (typeof teamsIn === 'function') { callback = teamsIn; teams = []; // Special case: empty list means all pending approvals } else { if (!(teamsIn && teamsIn.length)) { throw new Error('Unknown "teams" type for getPendingApprovals. Please file a bug.'); } // New permissions system refactoring... if (teamsIn.length > 0 && teamsIn[0] && teamsIn[0].id) { teams = []; for (i = 0; i < teamsIn.length; i++) { teams.push(teamsIn[i].id); } } } var query = new azure.TableQuery() .where('PartitionKey eq ?', this.options.partitionKey) .and('active eq ?', true); if (teams.length > 0) { var clauses = []; for (i = 0; i < teams.length; i++) { clauses.push('teamid eq ?string?'); } var args = [clauses.join(' or ')].concat(teams); query.and.apply(query, args); } dc.table.queryEntities(dc.options.pendingApprovalsTableName, query, null, function (error, results) { if (error) return callback(error); var entries = []; if (results && results.entries && results.entries.length) { for (var i = 0; i < results.entries.length; i++) { var r = results.entries[i]; if (r && r.active && r.active._) { entries.push(reduceEntity(r)); } } } callback(null, entries); }); }; DataClient.prototype.insertApprovalRequest = function iar(teamid, details, callback) { var dc = this; if (typeof teamid != 'string') { teamid = teamid.toString(); } details.teamid = teamid; dc.insertGeneralApprovalRequest('joinTeam', details, callback); }; DataClient.prototype.insertGeneralApprovalRequest = function igar(ticketType, details, callback) { var dc = this; var id = uuid.v4(); var entity = dc.createEntity(dc.options.partitionKey, id, { tickettype: ticketType }); dc.mergeIntoEntity(entity, details); dc.table.insertEntity(dc.options.pendingApprovalsTableName, entity, function (error, result, response) { if (error) { return callback(error); } // Pass back the generated request ID first. callback(null, id, result, response); }); }; function getReducedEntity(dc, tableName, partitionKey, rowKey, callback) { dc.table.retrieveEntity(tableName, partitionKey, rowKey, function (error, ent) { if (error) return callback(error); callback(null, reduceEntity(ent)); }); } DataClient.prototype.getRepositoryApproval = function (fieldName, repositoryValue, callback) { const dc = this; // Shortcoming: repoName is case sensitive const query = new azure.TableQuery() .where('PartitionKey eq ?', this.options.partitionKey) .and('tickettype eq ?', 'repo') .and(`${fieldName} eq ?`, repositoryValue); dc.table.queryEntities(dc.options.pendingApprovalsTableName, query, null, function (error, results) { if (error) return callback(error); const entries = []; if (results && results.entries && results.entries.length) { for (let i = 0; i < results.entries.length; i++) { const r = results.entries[i]; entries.push(reduceEntity(r)); } } callback(null, entries); }); }; DataClient.prototype.getApprovalRequest = function gar(requestId, callback) { getReducedEntity(this, this.options.pendingApprovalsTableName, this.options.partitionKey, requestId, callback); }; DataClient.prototype.getPendingApprovalsForUserId = function gpeaf(githubid, callback) { var dc = this; if (typeof githubid === 'number') { githubid = githubid.toString(); } var query = new azure.TableQuery() .where('PartitionKey eq ?', this.options.partitionKey) .and('active eq ?', true) .and('ghid eq ?', githubid); dc.table.queryEntities(dc.options.pendingApprovalsTableName, query, null, function (error, results) { if (error) return callback(error); var entries = []; if (results && results.entries && results.entries.length) { for (var i = 0; i < results.entries.length; i++) { var r = results.entries[i]; if (r && r.active && r.active._) { entries.push(reduceEntity(r)); } } } callback(null, entries); }); }; DataClient.prototype.replaceApprovalRequest = function uar(requestId, mergeEntity, callback) { var dc = this; var entity = dc.createEntity(dc.options.partitionKey, requestId, mergeEntity); dc.table.replaceEntity(dc.options.pendingApprovalsTableName, entity, callback); }; DataClient.prototype.updateApprovalRequest = function updatedVersion2(requestId, mergeEntity, callback) { // This is a less efficient implementation for now due to encryption work. var dc = this; dc.getApprovalRequest(requestId, (getError, currentVersion) => { if (getError) { return callback(getError); } var newObject = {}; Object.assign(newObject, currentVersion); Object.assign(newObject, mergeEntity); dc.replaceApprovalRequest(requestId, newObject, callback); }); }; module.exports = DataClient; <file_sep>/middleware/github/orgPermissions.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const utils = require('../../utils'); module.exports = function addOrgPermissionsToRequest(req, res, next) { // Only compute once per request if (req.orgPermissions) { return next(); } const oss = req.oss; const login = oss.usernames.github; const id = oss.id.github ? parseInt(oss.id.github, 10) : null; const organization = req.organization; const orgPermissions = { allowAdministration: false, owner: false, sudo: false, }; req.orgPermissions = orgPermissions; organization.isSudoer(login, (sudoCheckError, isSudoer) => { oss.isPortalAdministrator((portalSudoError, isPortalSudoer) => { if (portalSudoError) { return next(portalSudoError); } // Indicate that the user is has sudo rights if (isSudoer === true || isPortalSudoer === true) { orgPermissions.sudo = true; } // Get the organization owners organization.getOwners((getOwnersError, owners) => { if (getOwnersError) { return next(getOwnersError); } // +MIDDLEWARE: provide this later if it is needed elsewhere req.orgOwners = owners; const set = new Set(); for (let i = 0; i < owners.length; i++) { set.add(owners[i].id); } if (set.has(id)) { orgPermissions.owner = true; } req.orgOwnersSet = set; // Make a permission decision if (orgPermissions.owner || orgPermissions.sudo) { orgPermissions.allowAdministration = true; } // Are they even an organization member? const membershipCacheOptions = { maxAgeSeconds: 30, backgroundRefresh: false, }; organization.getMembership(login, membershipCacheOptions, (getMembershipError, membershipStatus) => { if (getMembershipError && getMembershipError.innerError && getMembershipError.innerError.code === 404) { getMembershipError = null; membershipStatus = false; } if (getMembershipError) { return next(utils.wrapError(getMembershipError, `Unable to successfully validate whether you are already a member of the ${organization.name} organization on GitHub`)); } if (membershipStatus && membershipStatus.state) { membershipStatus = membershipStatus.state; } orgPermissions.membershipStatus = membershipStatus; return next(); }); }); }); }); }; <file_sep>/routes/teamsPager.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const _ = require('lodash'); const TeamSearch = require('../business/teamSearch'); function sortOrgs(orgs) { return _.sortBy(orgs, ['name']); } function getTeamsData(id, crossOrgOrOrgName, operations, callback) { const options = { backgroundRefresh: true, maxAgeSeconds: 60 * 10 /* 10 minutes */, individualMaxAgeSeconds: 60 * 30 /* 30 minutes */, }; operations.getTeams(crossOrgOrOrgName, options, (error, teams) => { if (error) { return callback(error); } const input = Array.isArray(teams) ? teams : Array.from(teams.values()); const list = []; input.forEach(team => { let entry = team; // Cross-organization entries need to be massaged if (team.orgs && !team.organization) { const orgs = Object.getOwnPropertyNames(team.orgs); const firstOrg = orgs[0]; entry = team.orgs[firstOrg]; entry.organization = { login: firstOrg, }; } list.push(entry); }); const yourTeamsMap = new Map(); operations.getUserContext(id).getAggregatedOverview((overviewWarning, overview) => { if (overviewWarning) { // TODO: What to show here? return callback(null, list, yourTeamsMap, null, null, overviewWarning /* warning */); } reduceTeams(overview.teams, 'member', yourTeamsMap); reduceTeams(overview.teams, 'maintainer', yourTeamsMap); return callback(null, list, yourTeamsMap, overview.teams && overview.teams.member ? overview.teams.member.length : 0, overview.teams && overview.teams.maintainer ? overview.teams.maintainer.length : 0); }); }); } function reduceTeams(collections, property, map) { if (!collections) { return; } const values = collections[property]; values.forEach(team => { map.set(team.id, property); }); } module.exports = (req, res, next) => { const operations = req.app.settings.operations; const isCrossOrg = req.teamsPagerMode === 'orgs'; const id = req.oss.id.github; const orgName = isCrossOrg ? null : req.org.name; getTeamsData(id, isCrossOrg ? null : orgName.toLowerCase(), operations, (error, teams, yourTeamsMap, totalMemberships, totalMaintainerships, warning) => { if (error) { return next(error); } const page = req.query.page_number ? req.query.page_number : 1; let phrase = req.query.q; let set = req.query.set; if (set !== 'all' && set !== 'available' && set !== 'your') { set = 'your'; } const filters = []; if (phrase) { filters.push({ type: 'phrase', value: phrase, displayPrefix: 'matching', }); } const search = new TeamSearch(teams, { phrase: phrase, set: set, yourTeamsMap: yourTeamsMap, }); search.search(null, page, req.query.sort).then(() => { const onboardingOrJoining = req.query.joining || req.query.onboarding; req.oss.render(req, res, 'teams/', 'Teams', { orgs: isCrossOrg ? sortOrgs(req.oss.orgs()) : undefined, organization: isCrossOrg ? undefined : req.org, search: search, filters: filters, query: { phrase: phrase, set: set, }, yourTeamsMap: yourTeamsMap, totalMemberships: totalMemberships, totalMaintainerships: totalMaintainerships, errorAsWarning: warning /* if an error occurs that is not fatal, we may want to display information about it */, onboardingOrJoining: onboardingOrJoining, }); }).catch(next); }); }; <file_sep>/business/operations.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const async = require('async'); const Account = require('./account'); const GraphManager = require('./graphManager'); const Organization = require('./organization'); const UserContext = require('./user/context'); // defaults could move to configuration alternatively const defaults = { orgReposStaleSeconds: 60 * 15 /* 15m */, orgRepoTeamsStaleSeconds: 60 * 3 /* 3m */, orgRepoCollaboratorsStaleSeconds: 60 * 30 /* 30m */, orgRepoCollaboratorStaleSeconds: 30 /* half minute */, orgRepoDetailsStaleSeconds: 60 * 5 /* 5m */, orgTeamsStaleSeconds: 60 * 5 /* 5m */, orgTeamsSlugLookupStaleSeconds: 30 /* half a minute */, orgMembersStaleSeconds: 60 * 30 /* 30m */, teamMaintainersStaleSeconds: 60 * 2 /* 2m */, orgMembershipStaleSeconds: 60 * 5 /* 5m */, orgMembershipDirectStaleSeconds: 30 /* 30s */, crossOrgsReposStaleSecondsPerOrg: 60 * 60 * 2 /* 2 hours per org */, crossOrgsReposParallelCalls: 3, crossOrgsMembersStaleSecondsPerOrg: 60 * 60 * 2 /* 2 hours per org */, crossOrgsMembersParallelCalls: 5, corporateLinksStaleSeconds: 60 * 5 /* 5m */, repoBranchesStaleSeconds: 60 * 5 /* 5m */, accountDetailStaleSeconds: 60 * 60 * 24 /* 24h */, orgRepoWebhooksStaleSeconds: 60 * 60 * 8 /* 8h */, teamRepositoryPermissionStaleSeconds: 0 /* 0m */, }; class Operations { constructor(options) { setRequiredProperties(this, ['github', 'config', 'dataClient', 'insights', 'redis'], options); this.providers = options; this.defaults = Object.assign({}, defaults); this.mailAddressProvider = options.mailAddressProvider; this.mailProvider = options.mailProvider; this.graphManager = new GraphManager(this, options); return this; } get organizationNames() { if (!_private(this).organizationNames) { const names = []; for (let i = 0; i < this.config.github.organizations.length; i++) { names.push(this.config.github.organizations[i].name.toLowerCase()); } _private(this).organizationNames = names; } return _private(this).organizationNames; } get organizations() { if (!_private(this).organizations) { const orgs = {}; const names = this.organizationNames; for (let i = 0; i < names.length; i++) { const org = createOrganization(this, names[i]); orgs[names[i]] = org; } _private(this).organizations = orgs; } return _private(this).organizations; } getOrganizations(orgList) { if (!orgList) { return this.organizations; } const references = []; orgList.forEach(orgName => { const org = this.getOrganization(orgName); references.push(org); }); return references; } getOrganizationOriginalNames() { if (!_private(this).organizationOriginalNames) { const names = []; for (let i = 0; i < this.config.github.organizations.length; i++) { names.push(this.config.github.organizations[i].name); } _private(this).organizationOriginalNames = names; } return _private(this).organizationOriginalNames; } translateOrganizationNamesFromLowercase(object) { const orgs = this.getOrganizationOriginalNames(); orgs.forEach(name => { const lc = name.toLowerCase(); if (name !== lc && object[lc] !== undefined) { object[name] = object[lc]; delete object[lc]; } }); return object; } get organizationNamesWithTokens() { if (!_private(this).organizationNamesWithTokens) { const tokens = {}; for (let i = 0; i < this.config.github.organizations.length; i++) { const name = this.config.github.organizations[i].name.toLowerCase(); const token = this.config.github.organizations[i].ownerToken; tokens[name] = token; } _private(this).organizationNamesWithTokens = tokens; } return _private(this).organizationNamesWithTokens; } getOrganization(name, callback) { const lc = name.toLowerCase(); const org = this.organizations[lc]; if (!org) { throw new Error(`Could not find configuration for the "${name}" organization.`); } if (callback) { return callback(null, org); } return org; } getUserContext(userId) { // This will leak per user for the app runtime. Can use a LRU or limiting cache in the future if needed. if (!_private(this).userContext) { _private(this).userContext = new Map(); } userId = typeof(userId) === 'string' ? parseInt(userId, 10) : userId; const contexts = _private(this).userContext; let user = contexts.get(userId); if (!user) { user = new UserContext(this, userId); contexts.set(userId, user); } return user; } getRepos(callback) { const repos = []; const cacheOptions = { maxAgeSeconds: this.defaults.crossOrgsReposStaleSecondsPerOrg, }; // CONSIDER: Cross-org functionality might be best in the GitHub library itself const orgs = this.organizations; async.eachLimit( orgs, this.defaults.crossOrgsReposParallelCalls, (organization, next) => { organization.getRepositories(cacheOptions, (getReposError, orgRepos) => { if (!getReposError) { for (let i = 0; i < orgRepos.length; i++) { repos.push(orgRepos[i]); } } return next(getReposError); }); }, (error) => { return callback(error ? error : null, error ? null : repos); }); } getLinks(options, callback) { if (!callback && typeof (options) === 'function') { callback = options; options = null; } options = options || { includeNames: true, includeId: true, includeServiceAccounts: true, }; const caching = { maxAgeSeconds: options.maxAgeSeconds || this.defaults.corporateLinksStaleSeconds, backgroundRefresh: true, }; delete options.maxAgeSeconds; delete options.backgroundRefresh; return this.github.links.getLinks( options, caching, callback); } getTeamsWithMembers(orgName, options, callback) { const cacheOptions = {}; options = options || {}; cacheOptions.backgroundRefresh = options.backgroundRefresh !== undefined ? options.backgroundRefresh : true; cacheOptions.maxAgeSeconds = options.maxAgeSeconds || 60 * 10; cacheOptions.individualMaxAgeSeconds = options.individualMaxAgeSeconds; delete options.backgroundRefresh; delete options.maxAgeSeconds; delete options.individualMaxAgeSeconds; this.github.crossOrganization.teamMembers(this.organizationNamesWithTokens, options, cacheOptions, callback); } getRepoCollaborators(orgName, options, callback) { const cacheOptions = {}; options = options || {}; cacheOptions.backgroundRefresh = options.backgroundRefresh !== undefined ? options.backgroundRefresh : true; cacheOptions.maxAgeSeconds = options.maxAgeSeconds || 60 * 10; cacheOptions.individualMaxAgeSeconds = options.individualMaxAgeSeconds; delete options.backgroundRefresh; delete options.maxAgeSeconds; delete options.individualMaxAgeSeconds; this.github.crossOrganization.repoCollaborators(this.organizationNamesWithTokens, options, cacheOptions, callback); } getRepoTeams(orgName, options, callback) { const cacheOptions = {}; options = options || {}; cacheOptions.backgroundRefresh = options.backgroundRefresh !== undefined ? options.backgroundRefresh : true; cacheOptions.maxAgeSeconds = options.maxAgeSeconds || 60 * 10; cacheOptions.individualMaxAgeSeconds = options.individualMaxAgeSeconds; delete options.backgroundRefresh; delete options.maxAgeSeconds; delete options.individualMaxAgeSeconds; this.github.crossOrganization.repoTeams(this.organizationNamesWithTokens, options, cacheOptions, callback); } getTeams(orgName, options, callback) { if (!callback && typeof(options) === 'function') { callback = options; options = {}; } else if (!callback && !options && typeof(orgName) === 'function') { callback = orgName; options = {}; orgName = null; } if (!options.maxAgeSeconds) { options.maxAgeSeconds = this.defaults.crossOrgsMembersStaleSecondsPerOrg; } if (options.backgroundRefresh === undefined) { options.backgroundRefresh = true; } const cacheOptions = { maxAgeSeconds: options.maxAgeSeconds, backgroundRefresh: options.backgroundRefresh, }; delete options.maxAgeSeconds; delete options.backgroundRefresh; if (!orgName) { return this.github.crossOrganization.teams( this.organizationNamesWithTokens, options, cacheOptions, (error, values) => { return callback(error ? error : null, error ? null : crossOrganizationResults(this, values, 'id')); }); } this.getOrganization(orgName).getTeams(cacheOptions, callback); } getMembers(orgName, options, callback) { if (!callback && typeof(options) === 'function') { callback = options; options = {}; } else if (!callback && !options && typeof(orgName) === 'function') { callback = orgName; options = {}; orgName = null; } if (!options.maxAgeSeconds) { options.maxAgeSeconds = this.defaults.crossOrgsMembersStaleSecondsPerOrg; } if (options.backgroundRefresh === undefined) { options.backgroundRefresh = true; } const cacheOptions = { maxAgeSeconds: options.maxAgeSeconds, backgroundRefresh: options.backgroundRefresh, }; delete options.maxAgeSeconds; delete options.backgroundRefresh; if (!orgName) { return this.github.crossOrganization.orgMembers( this.organizationNamesWithTokens, options, cacheOptions, (error, values) => { return callback(error ? error : null, error ? null : crossOrganizationResults(this, values, 'id')); }); } const combinedOptions = Object.assign(options, cacheOptions); this.getOrganization(orgName).getMembers(combinedOptions, callback); } get systemAccountsByUsername() { return this.config.github && this.config.github.systemAccounts ? this.config.github.systemAccounts.logins : []; } isSystemAccountByUsername(username) { const lc = username.toLowerCase(); const usernames = this.systemAccountsByUsername; for (let i = 0; i < usernames.length; i++) { if (usernames[i].toLowerCase() === lc) { return true; } } return false; } getAccount(id) { // TODO: Centralized "accounts" local store const entity = { id: id }; return new Account(entity, this, getCentralOperationsToken.bind(null, this)); } } function getCentralOperationsToken(self) { if (self.config.github.organizations.length <= 0) { throw new Error('No organizations configured.'); } const firstOrg = self.config.github.organizations[0]; return firstOrg.ownerToken; } function createOrganization(self, name) { name = name.toLowerCase(); for (let i = 0; i < self.config.github.organizations.length; i++) { const settings = self.config.github.organizations[i]; if (settings.name.toLowerCase() === name) { return new Organization(self, name, settings); } } throw new Error(`This application is not configured for the "${name}" organization.`); } function setRequiredProperties(self, properties, options) { for (let i = 0; i < properties.length; i++) { const key = properties[i]; if (!options[key]) { throw new Error(`Required option with key "${key}" was not provided.`); } self[key] = options[key]; } } module.exports = Operations; function crossOrganizationResults(operations, results, keyProperty) { keyProperty = keyProperty || 'id'; const map = new Map(); operations.translateOrganizationNamesFromLowercase(results.orgs); for (const orgName of Object.getOwnPropertyNames(results.orgs)) { const orgValues = results.orgs[orgName]; for (let i = 0; i < orgValues.length; i++) { const val = orgValues[i]; const key = val[keyProperty]; if (!key) { throw new Error(`Entity missing property ${key} during consolidation processing.`); } let mapEntry = map.get(key); if (!mapEntry) { mapEntry = { orgs: {}, }; mapEntry[keyProperty] = key; map.set(key, mapEntry); } mapEntry.orgs[orgName] = val; } } map.meta = results.meta; map.cost = results.cost; return map; } const privateSymbol = Symbol(); function _private(self) { if (self[privateSymbol] === undefined) { self[privateSymbol] = {}; } return self[privateSymbol]; } <file_sep>/routes/link-cleanup.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const router = express.Router(); const utils = require('../utils'); const OpenSourceUserContext = require('../lib/context'); // Enforcing just a single GitHub account per Active Directory user. With // mild refactoring, this portal could easily support a session selecting // which link to work with, too. router.use((req, res, next) => { let config = req.app.settings.runtimeConfig; if (config.authentication.scheme !== 'aad') { return next(new Error('Link cleanup is only supported for certain types of authentication schemes.')); } let dc = req.app.settings.dataclient; dc.getUserByAadUpn(req.user.azure.username, function (findError, userLinks) { if (findError) { return next(new Error('Link cleanup is not available.')); } req.linksForCleanup = userLinks; if (userLinks.length === 1 && req.user && req.user.github && req.user.github.id !== userLinks[0].ghid) { if (req.body.unlink && req.body.confirm) { return unlink(req, userLinks[0], (unlinkError) => { if (unlinkError) { next(unlinkError); } else { res.redirect('/'); } }); } if (req.body.link && req.session.enableMultipleAccounts === true) { return link(req, req.body.link, (linkError, linkObject) => { if (linkError) { next(linkError); } else { req.session.selectedGithubId = linkObject.ghid; res.redirect('/?onboarding=yes'); } }); } return renderChangeAccountPage(req, res, userLinks[0]); } if (userLinks.length < 2) { return res.redirect('/'); } // CONSIDER: Make GitHub user calls to see if these users still exist. // EDGE: user renamed their GitHub account... so we may not have their latest GitHub ID, but // it would not create a duplicate link since the GHID fields would be the same. next(); }); }); function renderChangeAccountPage(req, res, link) { req.oss.render(req, res, 'removeothergithubaccount', 'Exiting GitHub account found', { link: link, confirming: req.body.unlink, hideGitHubAccount: true, allowAdditionalAccountLink: req.session && req.session.enableMultipleAccounts ? req.session.enableMultipleAccounts : false, }); } function renderCleanupPage(req, res, idToConfirm, links) { links = links || req.linksForCleanup; let twoColumns = [[], []]; for (let i = 0; i < links.length; i++) { if (links[i].joined) { links[i].joinedDate = new Date(Math.round(links[i].joined)); } twoColumns[i % 2].push(links[i]); } req.oss.render(req, res, 'multiplegithubaccounts', 'GitHub Cleanup', { linksForCleanupByColumn: twoColumns, numberToRemove: req.linksForCleanup.length - 1, confirming: idToConfirm, hideGitHubAccount: true, }); } router.get('/', (req, res) => { renderCleanupPage(req, res); }); router.post('/', (req, res, next) => { let action = 'unlink'; let id = req.body.unlink; if (!req.body.unlink && req.session && req.session.enableMultipleAccounts === true && req.body.select) { id = req.body.select; action = 'select'; } let link = null; let remainingLinks = []; for (let i = 0; i < req.linksForCleanup.length; i++) { if (req.linksForCleanup[i].ghid === id) { link = req.linksForCleanup[i]; } else { remainingLinks.push(req.linksForCleanup[i]); } } if (!link) { return next(new Error(`Could not identify the link for GitHub user ${id}.`)); } if (action === 'select') { req.session.selectedGithubId = id; return res.redirect('/'); } let isConfirming = req.body.confirm === id; if (!isConfirming) { return renderCleanupPage(req, res, id); } unlink(req, link, (unlinkError) => { if (unlinkError) { return next(unlinkError); } if (remainingLinks.length > 1) { renderCleanupPage(req, res, null, remainingLinks); } else { req.oss.saveUserAlert(req, link.ghu + ' has been unlinked. You now have just one GitHub account link.', 'Link cleanup complete', 'success'); res.redirect('/'); } }); }); function unlink(req, link, callback) { const options = { config: req.app.settings.runtimeConfig, dataClient: req.app.settings.dataclient, redisClient: req.app.settings.dataclient.cleanupInTheFuture.redisClient, redisHelper: req.app.settings.redisHelper, githubLibrary: req.app.settings.githubLibrary, link: link, insights: req.insights, }; new OpenSourceUserContext(options, function (contextError, unlinkContext) { if (contextError) { return callback(contextError); } unlinkContext.modernUser().unlinkAndDrop(callback); }); } function invalidateCache(req, link, callback) { const options = { config: req.app.settings.runtimeConfig, dataClient: req.app.settings.dataclient, redisClient: req.app.settings.dataclient.cleanupInTheFuture.redisClient, redisHelper: req.app.settings.redisHelper, githubLibrary: req.app.settings.githubLibrary, link: link, insights: req.insights, }; new OpenSourceUserContext(options, function (contextError, unlinkContext) { if (contextError) { return callback(contextError); } unlinkContext.invalidateLinkCache(callback); }); } function link(req, id, callback) { const dc = req.app.settings.dataclient; dc.createLinkObjectFromRequest(req, function (createLinkError, linkObject) { if (createLinkError) { return callback(utils.wrapError(createLinkError, `We had trouble linking your corporate and GitHub accounts: ${createLinkError.message}`)); } dc.insertLink(req.user.github.id, linkObject, function (insertError) { req.insights.trackEvent('PortalUserLinkAdditionalAccount'); if (insertError) { return callback(insertError); } invalidateCache(req, linkObject, () => { callback(null, linkObject); }); }); }); } module.exports = router; <file_sep>/jobs/reports/consolidated.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; // This JS file is shared between multiple projects/services. // // It helps take a minimum viable consolidated // report, containing issue type definitions and also the entities impacted // by that issue, and a simple recipient list, and explodes that into a set // of reports for a specific recipient. // // Any changes should be fully synchronized between the two repos. There is // a "schema version" saved inside the metadata that is validated by the // witness event digest processor before using a consolidated report. While // bug fixes to this file can be made without changing the schema version, // any changes to the consolidated format should take into account // compatibility. function addEntityToRecipientMap(map, providerName, recipient, entity, definitions, options) { const filterDefinitionCategories = options.filterDefinitionCategories ? new Set(options.filterDefinitionCategories) : false; const simplifiedRecipientName = `${recipient.type}:${recipient.value}`; let recipientView = map.get(simplifiedRecipientName); if (!recipientView) { recipientView = { reasons: new Set(), }; map.set(simplifiedRecipientName, recipientView); } const issueNames = Object.getOwnPropertyNames(entity.issues); for (let i = 0; i < issueNames.length; i++) { const issueName = issueNames[i]; if (recipient.specific && recipient.specific.issueNames && !recipient.specific.issueNames.has(issueName)) { continue; } const definition = definitions[issueName]; if (filterDefinitionCategories && definition.category && !filterDefinitionCategories.has(definition.category)) { continue; } if (recipient.reasons) { for (let i = 0; i < recipient.reasons.length; i++) { recipientView.reasons.add(recipient.reasons[i]); } } if (!recipientView[issueName]) { const entry = { definition: definition, }; if (definition.hasTable) { entry.table = { rows: [], }; Object.assign(entry.table, definition.table); } if (definition.hasList) { entry.list = { listItems: [], }; Object.assign(entry.list, definition.list); } recipientView[issueName] = entry; } let entry = recipientView[issueName]; const entityIssue = entity.issues[issueName]; const specificItems = entity.specific && entity.specific.issueItems ? entity.specific.issueItems : null; if (definition.hasTable) { fillFrom(entityIssue, 'rows', entry.table, entity, specificItems); } if (definition.hasList) { fillFrom(entityIssue, 'listItems', entry.list, entity, specificItems); } } } function fillFrom(object, property, target, entity, specificItems) { const source = object[property]; if (source && Array.isArray(source) && Array.isArray(target[property]) && source.length) { const targetArray = target[property]; for (let i = 0; i < source.length; i++) { const sourceItem = source[i]; if (specificItems && !specificItems.has(sourceItem)) { continue; } let lineItem = typeof(source[i]) === 'object' ? Object.assign({}, sourceItem) : { text: sourceItem }; if (!lineItem.entityName && entity.name) { lineItem.entityName = entity.name; } targetArray.push(lineItem); } } } function identifyAdditionalRecipients(entity, recipients) { const additionals = []; const additionalEntries = new Map(); const issues = entity.issues; if (!issues) { return additionals; } const issueNames = Object.getOwnPropertyNames(issues); for (let i = 0; i < issueNames.length; i++) { const issueName = issueNames[i]; const issue = entity.issues[issueNames[i]]; let items = null; if (issue.listItems && issue.listItems.length) { items = issue.listItems; } else if (issue.rows && issue.rows.length) { items = issue.rows; } if (items) { for (let j = 0; j < items.length; j++) { const item = items[j]; if (item.additionalRecipients) { for (let k = 0; k < item.additionalRecipients.length; k++) { const recipient = item.additionalRecipients[k]; let found = null; for (let l = 0; l < recipients.length; l++) { const existing = recipients[l]; if (existing.type === recipient.type && existing.value === recipient.value) { found = existing; break; } } if (found) { const reasonSet = new Set(found.reasons); for (let m = 0; m < recipient.reasons.length; m++) { reasonSet.add(recipient.reasons[m]); } found.reasons = Array.from(reasonSet.values()); } else { const combined = `:${recipient.type}:${recipient.value}:`; let entry = additionalEntries.get(combined); if (!entry) { entry = Object.assign({ specific: { issueNames: new Set(), issueItems: new Set(), }, }, item.additionalRecipients[k]); additionalEntries.set(combined, entry); additionals.push(entry); } entry.specific.issueNames.add(issueName); entry.specific.issueItems.add(item); } } } } } } return additionals; } function deduplicateRecipients(recipients) { const visited = new Map(); const r = []; for (let i = 0; i < recipients.length; i++) { const recipient = recipients[i]; const combined = `:${recipient.type}:${recipient.value}:`; let deduplicatedEntry = visited.get(combined); if (!deduplicatedEntry) { const clonedRecipient = Object.assign({}, recipient); delete clonedRecipient.reasons; r.push(clonedRecipient); deduplicatedEntry = { reasons: new Set(), clone: clonedRecipient, }; visited.set(combined, deduplicatedEntry); } if (recipient.reasons) { for (let j = 0; j < recipient.reasons.length; j++) { deduplicatedEntry.reasons.add(recipient.reasons[j]); } deduplicatedEntry.clone.reasons = Array.from(deduplicatedEntry.reasons.values()); } } return r; } function buildConsolidatedMap(consolidated, options) { options = options || {}; const byRecipient = new Map(); const providerNames = Object.getOwnPropertyNames(consolidated); for (let i = 0; i < providerNames.length; i++) { const providerName = providerNames[i]; const dataset = consolidated[providerName]; if (typeof (dataset) !== 'object' || providerName === 'metadata') { continue; } const definitions = {}; const providerByName = new Map(); for (let x = 0; x < dataset.definitions.length; x++) { const d = dataset.definitions[x]; definitions[d.name] = d; } if (dataset.entities && dataset.entities.length) { for (let j = 0; j < dataset.entities.length; j++) { const entity = dataset.entities[j]; const recipients = deduplicateRecipients(entity && entity.recipients ? entity.recipients : []); const additionalRecipients = identifyAdditionalRecipients(entity, recipients); const allRecipients = recipients.concat(additionalRecipients); const entityClone = Object.assign({}, entity); delete entityClone.recipients; for (let k = 0; k < allRecipients.length; k++) { const recipient = allRecipients[k]; addEntityToRecipientMap(providerByName, providerName, recipient, entityClone, definitions, options); } } } for (let recipient of providerByName.keys()) { const values = providerByName.get(recipient); if (!byRecipient.has(recipient)) { const recipientEntries = []; recipientEntries.reasons = new Set(); byRecipient.set(recipient, recipientEntries); } const entry = byRecipient.get(recipient); if (values.reasons && entry.reasons) { for (let reason of values.reasons) { entry.reasons.add(reason); } } for (let d = 0; d < dataset.definitions.length; d++) { const definition = dataset.definitions[d]; if (values[definition.name]) { entry.push(values[definition.name]); } } } } // Reduce the set of reasons down to an array; remove empty reports const keys = Array.from(byRecipient.keys()); for (let i = 0; i < keys.length; i++) { const key = keys[i]; const value = byRecipient.get(key); if (value.length === 0) { byRecipient.delete(key); continue; } if (value.reasons && value.reasons.add) { value.reasons = Array.from(value.reasons.values()); } } return byRecipient; } module.exports.buildRecipientMap = buildConsolidatedMap; <file_sep>/business/organizationMember.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const common = require('./common'); const githubEntityClassification = require('../data/github-entity-classification.json'); const memberPrimaryProperties = githubEntityClassification.member.keep; const memberSecondaryProperties = githubEntityClassification.member.strip; class Member { constructor(organization, entity, getToken, operations) { this.organization = organization; if (entity) { common.assignKnownFields(this, entity, 'member', memberPrimaryProperties, memberSecondaryProperties); } const privates = _private(this); privates.getToken = getToken; privates.operations = operations; } getMailAddress(callback) { if (!this.id) { return callback(new Error('No organization member ID')); } const operations = _private(this).operations; operations.graphManager.getCachedLink(this.id, (getLinkError, link) => { if (getLinkError || !link || !link.aadupn) { return callback(getLinkError); } const providers = operations.providers; if (!providers.mailAddressProvider) { return callback(new Error('No mailAddressProvider is available in this application instance')); } providers.mailAddressProvider.getAddressFromUpn(link.aadupn, callback); }); } } module.exports = Member; const privateSymbol = Symbol(); function _private(self) { if (self[privateSymbol] === undefined) { self[privateSymbol] = {}; } return self[privateSymbol]; } <file_sep>/routes/org/team/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); const async = require('async'); const emailRender = require('../../../lib/emailRender'); const lowercaser = require('../../../middleware/lowercaser'); const orgPermissions = require('../../../middleware/github/orgPermissions'); const teamMaintainerRoute = require('./index-maintainer'); const teamPermissionsMiddleware = require('../../../middleware/github/teamPermissions'); const utils = require('../../../utils'); router.use((req, res, next) => { const oss = req.oss; const login = oss.usernames.github; const team2 = req.team2; team2.getMembershipEfficiently(login, (getMembershipError, membership) => { if (getMembershipError) { return next(getMembershipError); } req.membershipStatus = membership; return next(); }); }); router.use('/join', orgPermissions, (req, res, next) => { const organization = req.organization; const team2 = req.team2; const orgPermissions = req.orgPermissions; // Are they already a team member? const currentMembershipStatus = req.membershipStatus; if (currentMembershipStatus) { return next(utils.wrapError(null, `You are already a ${currentMembershipStatus} of the ${team2.name} team`, true)); } // Have they joined the organization yet? const membershipStatus = orgPermissions.membershipStatus; let error = null; if (membershipStatus !== 'active') { error = new Error(`You are not a member of the ${organization.name} GitHub organization.`); error.title = 'Please join the organization before joining this team'; error.detailed = membershipStatus === 'pending' ? 'You have not accepted your membership yet, or do not have two-factor authentication enabled.' : 'After you join the organization, you can join this team.'; error.skipOops = true; error.skipLog = true; error.fancyLink = { link: `/${organization.name}`, title: `Join the ${organization.name} organization`, }; } return next(error); }); router.get('/join', function (req, res, next) { const team = req.team; const oss = req.oss; // The broad access "all members" team is always open for automatic joining without // approval. This short circuit is to show that option. if (team.org.getAllMembersTeam().id === team.id) { return oss.render(req, res, 'org/team/join', `Join ${team.name}`, { team: team, allowSelfJoin: true, }); } // This maintainer custom logic code is only in the legacy 'team' at this time team.getOfficialMaintainers((getMaintainersError, maintainers) => { if (getMaintainersError) { return next(getMaintainersError); } req.oss.render(req, res, 'org/team/join', `Join ${team.name}`, { team: team, teamMaintainers: maintainers, }); }); }); router.post('/join', function (req, res, next) { const config = req.app.settings.runtimeConfig; const oss = req.oss; const org = req.org; const team = req.team; if (org.getAllMembersTeam().id === team.id) { return team.addMembership('member', function (error) { if (error) { req.insights.trackEvent('GitHubJoinAllMembersTeamFailure', { org: org.name, username: req.oss.usernames.github, error: error.message, }); return next(utils.wrapError(error, `We had trouble adding you to the ${org.name} organization. ${req.oss.usernames.github}`)); } req.oss.saveUserAlert(req, `You have joined ${team.name} team successfully`, 'Join Successfully', 'success'); req.insights.trackEvent('GitHubJoinAllMembersTeamSuccess', { org: org.name, username: req.oss.usernames.github }); return res.redirect(`${org.baseUrl}teams`); }); } const justification = req.body.justification; if (justification === undefined || justification === '') { return next(utils.wrapError(null, 'You must include justification for your request.', true)); } const approvalTypesValues = config.github.approvalTypes.repo; if (approvalTypesValues.length === 0) { return next(new Error('No team join approval providers configured.')); } const approvalTypes = new Set(approvalTypesValues); const mailProviderInUse = approvalTypes.has('mail'); let issueProviderInUse = approvalTypes.has('github'); if (!mailProviderInUse && !issueProviderInUse) { return next(new Error('No configured approval providers configured.')); } const mailProvider = req.app.settings.mailProvider; const approverMailAddresses = []; if (mailProviderInUse && !mailProvider) { return next(utils.wrapError(null, 'No mail provider is enabled, yet this application is configured to use a mail provider.')); } const mailAddressProvider = req.app.settings.mailAddressProvider; let notificationsRepo = null; try { notificationsRepo = issueProviderInUse ? org.getWorkflowRepository() : null; } catch (noWorkflowRepo) { notificationsRepo = false; issueProviderInUse = false; } const displayHostname = req.hostname; const approvalScheme = displayHostname === 'localhost' && config.webServer.allowHttp === true ? 'http' : 'https'; const reposSiteBaseUrl = `${approvalScheme}://${displayHostname}/`; const approvalBaseUrl = `${reposSiteBaseUrl}approvals/`; const personName = oss.modernUser().contactName(); let personMail = null; const dc = oss.dataClient(); let assignTo = null; let requestId = null; let allMaintainers = null; let issueNumber = null; let approvalRequest = null; async.waterfall([ function getRequesterEmailAddress(callback) { const upn = oss.modernUser().contactEmail(); mailAddressProvider.getAddressFromUpn(upn, (resolveError, mailAddress) => { if (resolveError) { return callback(resolveError); } personMail = mailAddress; callback(); }); }, function (callback) { team.isMember(callback); }, function (isMember, callback) { if (isMember === true) { return next(utils.wrapError(null, 'You are already a member of the team ' + team.name, true)); } team.getOfficialMaintainers(callback); }, (maintainers, callback) => { async.filter(maintainers, (maintainer, filterCallback) => { filterCallback(null, maintainer && maintainer.login && maintainer.link); }, callback); }, function (maintainers, callback) { approvalRequest = { ghu: oss.usernames.github, ghid: oss.id.github, justification: req.body.justification, requested: ((new Date()).getTime()).toString(), active: false, type: 'joinTeam', org: team.org.name, teamid: team.id, teamname: team.name, email: oss.modernUser().contactEmail(), name: oss.modernUser().contactName(), }; const randomMaintainer = maintainers[Math.floor(Math.random() * maintainers.length)]; assignTo = randomMaintainer ? randomMaintainer.login : ''; const mnt = []; async.each(maintainers, (maintainer, next) => { mnt.push('@' + maintainer.login); const approverUpn = maintainer && maintainer.link && maintainer.link.aadupn ? maintainer.link.aadupn : null; if (approverUpn) { mailAddressProvider.getAddressFromUpn(approverUpn, (getAddressError, mailAddress) => { if (getAddressError) { return next(getAddressError); } approverMailAddresses.push(mailAddress); next(); }); } else { next(); } }, (addressResolutionError) => { if (addressResolutionError) { return callback(addressResolutionError); } allMaintainers = mnt.join(', '); dc.insertApprovalRequest(team.id, approvalRequest, callback); }); }, function (newRequestId) { const callback = arguments[arguments.length - 1]; requestId = newRequestId; if (!issueProviderInUse) { return callback(); } const body = 'A team join request has been submitted by ' + oss.modernUser().contactName() + ' (' + oss.modernUser().contactEmail() + ', [' + oss.usernames.github + '](' + 'https://github.com/' + oss.usernames.github + ')) to join your "' + team.name + '" team ' + 'in the "' + team.org.name + '" organization.' + '\n\n' + allMaintainers + ': Can a team maintainer [review this request now](' + 'https://' + req.hostname + '/approvals/' + requestId + ')?\n\n' + '<em>If you use this issue to comment with the team maintainers, please understand that your comment will be visible by all members of the organization.</em>'; notificationsRepo.createIssue({ title: 'Request to join team "' + team.org.name + '/' + team.name + '" by ' + oss.usernames.github, body: body, }, callback); }, function (issue) { const callback = arguments[arguments.length - 1]; const itemUpdates = { active: true, }; if (issueProviderInUse) { if (issue.id && issue.number) { issueNumber = issue.number; itemUpdates.issueid = issue.id.toString(); itemUpdates.issue = issue.number.toString(); } else { return callback(new Error('An issue could not be created. The response object representing the issue was malformed.')); } } dc.updateApprovalRequest(requestId, itemUpdates, callback); }, function setAssignee() { req.oss.saveUserAlert(req, 'Your request to join ' + team.name + ' has been submitted and will be reviewed by a team maintainer.', 'Permission Request', 'success'); const callback = arguments[arguments.length - 1]; if (!issueProviderInUse) { return callback(); } notificationsRepo.updateIssue(issueNumber, { assignee: assignTo, }, function (error) { if (error) { // CONSIDER: Log. This error condition hits when a user has // been added to the org outside of the portal. Since they // are not associated with the workflow repo, they cannot // be assigned by GitHub - which throws a validation error. } callback(); }); }, function sendApproverMail() { const callback = arguments[arguments.length - 1]; if (!mailProviderInUse) { return callback(); } const approversAsString = approverMailAddresses.join(', '); const mail = { to: approverMailAddresses, subject: `${personName} wants to join your ${team.name} team in the ${team.org.name} GitHub org`, reason: (`You are receiving this e-mail because you are a team maintainer for the GitHub team "${team.name}" in the ${team.org.name} organization. To stop receiving these mails, you can remove your team maintainer status on GitHub. This mail was sent to: ${approversAsString}`), headline: `${team.name} permission request`, classification: 'action', service: 'Microsoft GitHub', correlationId: req.correlationId, }; const contentOptions = { correlationId: req.correlationId, version: config.logging.version, actionUrl: approvalBaseUrl + requestId, reposSiteUrl: reposSiteBaseUrl, approvalRequest: approvalRequest, team: team.name, org: team.org.name, personName: personName, personMail: personMail, }; emailRender.render(req.app.settings.basedir, 'membershipApprovals/pleaseApprove', contentOptions, (renderError, mailContent) => { if (renderError) { req.insights.trackException(renderError, { content: contentOptions, eventName: 'ReposTeamRequestPleaseApproveMailRenderFailure', }); return callback(renderError); } mail.content = mailContent; mailProvider.sendMail(mail, (mailError, mailResult) => { const customData = { content: contentOptions, receipt: mailResult, }; if (mailError) { customData.eventName = 'ReposTeamRequestPleaseApproveMailFailure'; req.insights.trackException(mailError, customData); return callback(mailError); } req.insights.trackEvent('ReposTeamRequestPleaseApproveMailSuccess', customData); dc.updateApprovalRequest(requestId, { mailSentToApprovers: approversAsString, mailSentTo: personMail, }, callback); }); }); }, function sendRequesterMail() { const callback = arguments[arguments.length - 1]; if (!mailProviderInUse) { return callback(); } // Let's send e-mail to the requester about this action const mail = { to: personMail, subject: `Your ${team.org.name} "${team.name}" permission request has been submitted`, reason: (`You are receiving this e-mail because you requested to join this team. This mail was sent to: ${personMail}`), headline: 'Team request submitted', classification: 'information', service: 'Microsoft GitHub', correlationId: req.correlationId, }; const contentOptions = { correlationId: req.correlationId, version: config.logging.version, actionUrl: approvalBaseUrl + requestId, reposSiteUrl: reposSiteBaseUrl, approvalRequest: approvalRequest, team: team.name, org: team.org.name, personName: personName, personMail: personMail, }; emailRender.render(req.app.settings.basedir, 'membershipApprovals/requestSubmitted', contentOptions, (renderError, mailContent) => { if (renderError) { req.insights.trackException(renderError, { content: contentOptions, eventName: 'ReposTeamRequestSubmittedMailRenderFailure', }); return callback(renderError); } mail.content = mailContent; mailProvider.sendMail(mail, (mailError, mailResult) => { const customData = { content: contentOptions, receipt: mailResult, }; if (mailError) { customData.eventName = 'ReposTeamRequestSubmittedMailFailure'; req.insights.trackException(mailError, customData); return callback(mailError); } req.insights.trackEvent('ReposTeamRequestSubmittedMailSuccess', customData); callback(); }); }); }, ], function (error) { if (error) { return next(error); } res.redirect(team.org.baseUrl); }); }); // Adds "req.teamPermissions", "req.teamMaintainers" middleware router.use(teamPermissionsMiddleware); // The view uses this information today to show the sudo banner router.use((req, res, next) => { if (req.teamPermissions.sudo === true) { req.sudoMode = true; } return next(); }); router.get('/', orgPermissions, (req, res, next) => { const oss = req.oss; const id = oss.id.github ? parseInt(oss.id.github, 10) : null; const teamPermissions = req.teamPermissions; const membershipStatus = req.membershipStatus; const team2 = req.team2; const legacyTeam = req.team; const operations = req.app.settings.operations; const organization = req.organization; const teamMaintainers = req.teamMaintainers; const maintainersSet = new Set(); for (let i = 0; i < teamMaintainers.length; i++) { maintainersSet.add(teamMaintainers[i].id); } let membersFirstPage = []; let teamDetails = null; let repositories = null; const isBroadAccessTeam = team2.isBroadAccessTeam; const isSystemTeam = team2.isSystemTeam; const orgOwnersSet = req.orgOwnersSet; let isOrgOwner = orgOwnersSet ? orgOwnersSet.has(id) : false; function renderPage() { oss.render(req, res, 'org/team/index', team2.name, { team: legacyTeam, teamUrl: req.teamUrl, // ? employees: [], // data.employees, pendingApprovals: [], // data.pendingApprovals, // changed implementation: maintainers: teamMaintainers, maintainersSet: maintainersSet, // new values: teamPermissions: teamPermissions, membershipStatus: membershipStatus, membersFirstPage: membersFirstPage, team2: team2, teamDetails: teamDetails, organization: organization, isBroadAccessTeam: isBroadAccessTeam, isSystemTeam: isSystemTeam, repositories: repositories, isOrgOwner: isOrgOwner, orgOwnersSet: orgOwnersSet, }); } // Get the first page (by 100) of members, we only show a subset const firstPageOptions = { pageLimit: 1, backgroundRefresh: true, maxAgeSeconds: 60, }; team2.getMembers(firstPageOptions, (getMembersError, membersSubset) => { if (getMembersError) { return next(getMembersError); } membersFirstPage = membersSubset; team2.getDetails((detailsError, details) => { if (detailsError) { return next(detailsError); } teamDetails = details; const onlySourceRepositories = { type: 'sources', }; team2.getRepositories(onlySourceRepositories, (reposError, reposWithPermissions) => { if (reposError) { return next(reposError); } repositories = reposWithPermissions.sort(sortByNameCaseInsensitive); operations.getLinks((getLinksError, links) => { if (getLinksError) { return next(getLinksError); } const map = new Map(); for (let i = 0; i < links.length; i++) { const id = links[i].ghid; if (id) { map.set(parseInt(id, 10), links[i]); } } async.parallel([ callback => { addLinkToList(teamMaintainers, map); return resolveMailAddresses(operations, teamMaintainers, callback); }, callback => { addLinkToList(membersFirstPage, map); return resolveMailAddresses(operations, membersFirstPage, callback); }, ], (parallelError) => { if (parallelError) { return next(parallelError); } return renderPage(); }); }); }); }); }); }); function addLinkToList(array, linksMap) { for (let i = 0; i < array.length; i++) { const entry = array[i]; const link = linksMap.get(entry.id); if (link) { entry.link = link; } } } function resolveMailAddresses(operations, array, callback) { const mailAddressProvider = operations.mailAddressProvider; if (!mailAddressProvider) { return callback(); } async.eachLimit(array, 5, (entry, next) => { const upn = entry && entry.link ? entry.link.aadupn : null; if (!upn) { return next(); } mailAddressProvider.getAddressFromUpn(upn, (resolveError, mailAddress) => { if (!resolveError && mailAddress) { entry.mailAddress = mailAddress; } return next(); }); }, callback); } function sortByNameCaseInsensitive(a, b) { let nameA = a.name.toLowerCase(); let nameB = b.name.toLowerCase(); if (nameA < nameB) { return -1; } if (nameA > nameB) { return 1; } return 0; } router.use('/members', require('./members')); router.get('/repos', lowercaser(['sort', 'language', 'type', 'tt']), require('../../reposPager')); router.use('/delete', require('./delete')); router.use('/properties', require('./properties')); router.use('/maintainers', require('./maintainers')); router.use(teamMaintainerRoute); module.exports = router; <file_sep>/webhooks/tasks/organization.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["dir", "log"] }] */ 'use strict'; module.exports = { filter: function (data) { let eventType = data.properties.event; return eventType === 'organization'; }, run: function (operations, organization, data, callback) { const event = data.body; let refresh = false; if (event.action === 'member_invited') { console.log(`org member invite; ghu ${event.invitation.login} role ${event.invitation.role} ghid ${event.invitation.id} org: ${event.organization.login}`); } else if (event.action === 'member_added') { console.log(`org member added; ghu ${event.membership.user.login} role ${event.membership.role} state ${event.membership.state} ghid ${event.membership.user.id} org: ${event.organization.login}`); refresh = true; } else if (event.action === 'member_removed') { console.log(`org member REMOVED; ghu ${event.membership.user.login} role ${event.membership.role} state ${event.membership.state} ghid ${event.membership.user.id} org: ${event.organization.login}`); refresh = true; } else { console.dir(data); } if (refresh) { const orgName = organization.name; console.log(`refreshing ${orgName} org members list`); const immediateRefreshOptions = { backgroundRefresh: false, maxAgeSeconds: 0.01, }; operations.getMembers(orgName, immediateRefreshOptions, () => { console.log(`refreshed membership list for the org ${orgName}, will refresh x-org immediately`); operations.getMembers(null, { backgroundRefresh: false, maxAgeSeconds: 0.01, }, () => { console.log('refreshed x-org memberships'); }); }); } callback(); }, }; <file_sep>/config/userAgent.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; function set(config) { config.userAgent = 'env://REPOS_USER_AGENT'; } set.evaluate = (config) => { if (!config.userAgent) { const pkg = require('../package.json'); config.userAgent = `${pkg.name}/${pkg.version}`; } }; module.exports = set; <file_sep>/views/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends layout block js_doc_ready | try { | if (initializeManageFilter !== undefined) { initializeManageFilter() } | if (initializeMembershipFilter !== undefined) { initializeMembershipFilter() } | } catch(ex) {}; block content - var overview = accountInfo ? accountInfo.overview : null div.container if onboarding === true if user && user.azure h1.huge Congratulations, #{user.azure.displayName || user.azure.username} else h1.huge Congratulations else //h1 | Open Source Portal&nbsp; small for GitHub div.col-md-8 if accountInfo && onboarding !== true if accountInfo.isLinkedUser !== false && accountInfo.twoFactorOn === true // No longer showing this UI on the homepage for good states else div.container div.row(style='margin-top:16px') // linked membership div.col-md-4.col-lg-4.col-sm-4 if accountInfo.isLinkedUser !== false div.metro-box.ms-blue a(href='/unlink') h3 Linked Identity p= config.brand.companyName.toUpperCase() else div.metro-box.ms-light-gray a(href='/link') h3 Not linked p= config.brand.companyName.toUpperCase() // 2-factor authentication if accountInfo.twoFactorOff === true || accountInfo.twoFactorOn === true div.col-md-4.col-lg-4.col-sm-4 if accountInfo.twoFactorOff === true div.metro-box.ms-red a(href=activeOrgUrl + 'security-check') h3 2FA p AT RISK else if accountInfo.twoFactorOn === true div.metro-box.ms-green a(href=activeOrgUrl + 'security-check') h3 2-factor auth p PROTECTED if onboarding !== true && overview && overview.organizations.member && overview.organizations.member.length h1 a.a-unstyled(name='orgs') Your #{config.brand.companyName} GitHub organizations else h3 You've successfully linked your #{config.brand.companyName} and GitHub accounts. - var currentPriority = '' each o in overview.organizations.member div.link-box a(href=o + onboardingPostfixUrl) h2: strong= o //-else if o.membershipStateTemporary === 'pending' small span.label.label-danger Membership pending - var oo = getOrg(o) if oo.description p.lead= oo.description if onboarding === true h3 Your onboarding progress h5 | Sign in to your GitHub &amp; #{config.brand.companyName} accounts&nbsp; i.glyphicon.glyphicon-ok h5 | Link your accounts&nbsp; i.glyphicon.glyphicon-ok h5.text-primary | Join your first GitHub organization h5 | Multifactor security checkup h5 | Profile review h5 | Publish your membership <em>(optional)</em> h5 | Join a team <em>(optional)</em> //-if accountInfo.isSudoer h1 Organization Administration p Your account is a delegate administrator for the organization. You have additional capabilities enabled to help ensure the health of the organization. p a.btn.btn-default(href='/organization') Organization Delegate Dashboard if overview.teams.maintainer && overview.teams.maintainer.length if accountInfo.pendingApprovals && accountInfo.pendingApprovals.length && accountInfo.pendingApprovals.length > 0 h1 Approvals: Please Review p a.btn.btn-default(href='/approvals/') See all pending approvals (#{accountInfo.pendingApprovals.length}) if overview.teams.maintainer h1 Teams you maintain script(type='text/javascript'). function initializeManageFilter() { var inputManageFilter = $('#manage-filter'); if (inputManageFilter) { inputManageFilter.keyup(function () {; $.uiTableFilter($('table#manage-table'), this.value, ['Title', 'Organization', 'GitHub name']); }); } } //-div.container div table.table#manage-table thead tr th(colspan='2') form#manage-filter-form input.form-control#manage-filter(name='filter', placeholder='Filter teams I manage', type='text') th p i.glyphicon.glyphicon-search tr th GitHub name th Organization th Actions tbody each team in overview.teams.maintainer tr td a.btn.btn-sm.btn-muted(href='/' + team.organization.login + '/teams/' + team.slug)= team.name td.twentypercent= team.organization.login td.thirtypercent ul.list-unstyled li: a.btn.btn-sm.btn-default(href='/' + team.organization.login + '/teams/' + team.slug) Manage //-li: a.btn.btn-sm.btn-muted(href='https://github.com/orgs/' + team.organization.login + '/teams/' + team.slug, target='_new') GitHub if accountInfo.isLinkedUser && onboarding !== true if overview.teams && overview.teams.member && overview.teams.member.length h1 Team memberships p Here are teams that you are a member of but not a maintainer of. script(type='text/javascript'). function initializeMembershipFilter() { var inputMembershipFilter = $('#membership-filter'); if (inputMembershipFilter) { inputMembershipFilter.keyup(function () {; $.uiTableFilter($('table#membership-table'), this.value, ['Team', 'Organization']); }); } } table.table#membership-table thead tr th(colspan='2') form#membership-filter-form input.form-control#membership-filter(name='filter', placeholder='Filter my teams', type='text') th p i.glyphicon.glyphicon-search tr th Team th Organization th Actions tbody each team in overview.teams.member if team.id && accountInfo.teamsMaintainedHash && accountInfo.teamsMaintainedHash[team.id] !== undefined // Skipping this team since they are already maintaining it else tr td a.btn.btn-sm.btn-muted(href='/' + team.organization.login + '/teams/' + team.slug, target='_new')= team.name //-a.btn.btn-sm.btn-muted(href='https://github.com/orgs/' + team.organization.login + '/teams/' + team.slug, target='_new')= team.name td.twentypercent= team.organization.login td.twentypercent p if config.github.teamEveryoneId == team.id a.btn.btn-default.btn-sm(href='/' + team.organization.login + '/leave') Leave Organization else //-a.btn.btn-default.btn-sm(href='https://github.com/orgs/' + team.organization.login + '/teams/' + team.slug, target='_new') a.btn.btn-default.btn-sm(href='/' + team.organization.login + '/teams/' + team.slug, target='_new') | Open on GitHub //- The user will get a better experience joining teams if they go through the organization onboarding first. //-p a.btn.btn-default(href='/teams')= (accountInfo.userTeamMemberships && accountInfo.userTeamMemberships.length && accountInfo.userTeamMemberships.length > 0) ? 'Join another team' : 'Join a team' else //-p You are not currently a member of any GitHub teams that grant you permission to specific repositories. Note that this display is cached and may not be up-to-date. div.col-md-4 if overview.organizations.available && overview.organizations.available.length h2 Available #{config.brand.companyName} GitHub organizations .right if onboarding === true p Join these #{config.brand.companyName} organizations to see private repos and get elevated privileges. - var currentPriority = '' if groupedAvailableOrganizations each list, groupType in groupedAvailableOrganizations if groupType === 'secondary' hr h5 Additional Organizations p.small The following organizations are specialized for specific teams and projects, but not general-purpose membership. Please only join if you have a business need. each o in list //-if o.membershipStateTemporary !== 'active' && o.membershipStateTemporary !== 'pending' if o.priority !== currentPriority - currentPriority = o.priority //-if currentPriority == 'secondary' //-if o.locked === true // o.membershipStateTemporary !== 'active' if o.locked === true //- Do not show by invitation only orgs at this time else div.link-box a(href='/' + o.name + '/join' + onboardingPostfixUrl) if groupType === 'secondary' h6.text-muted strong= o.name + ' ' small: span.label.label-muted Join p.small.text-mute= o.description else h4 = o.name + ' ' small: span.label.label-primary Join p.small(style='color:#333')= o.description <file_sep>/views/repos/repo.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block additional_head link(href='/css/c3.min.css', rel='stylesheet') script(src='/js/d3.min.js') script(src='/js/c3.min.js') block js_doc_ready include ../js/metrics.js | displayRepoMetrics(#{repo.id}, #{organization.id}); mixin simplePersonView(account) - var shorthandName = account.login div.horizontal-space-right.vertical-space.pull-left(style='width:36px;height:36px;display:block;vertical-align:middle') if account.avatar_url img(src=account.avatar_url + '&s=96', style='width:36px;height:36px', title='Avatar of ' + shorthandName) div(style='display:block;vertical-align:middle') h4 a(href='/people?q=' + account.login)= shorthandName //-ul.list-inline if shorthandName && shorthandName !== account.login li span(title=account.login + ' is the GitHub username for ' + shorthandName)= account.login if corporateIdentity if link && !corporate li.text-warning!= octicon('link', 16) li span(title=link.aadoid)= link.aadupn else li!= octicon('link', 16) li span(title=corporateIdentity + ' is the corporate identity for ' + shorthandName)= corporateIdentity //- just corporate e-mails here, not user emails if email li a(href='mailto:' + email, title='Send corporate email to ' + email) != octicon('mail', 16) if serviceAccount li!= octicon('hubot', 16) li Service account mixin teamsList(teams, hideJoinOption) if teams && teams.length > 0 table(style='width:100%') tbody each permission in teams - var team = permission.team tr td ul.list-inline - var isMember = teamSets && teamSets.member.has(team.id) //- formerly: permission.relativeJoinLink if hideJoinOption li ul.list-unstyled li: strong= team.name li: p= team.description else li a.btn.btn-sm.btn-muted-more(href=permission.relativePortalLink, title=team.description)= team.name if teamSets && teamSets.maintained.has(team.id) li.text-primary You're a maintainer else if teamSets && teamSets.member.has(team.id) li.text-primary You're a member if team.description && !hideJoinOption p= team.description else p No teams with this permission. block content //- Services - var languageColor = viewServices.languageColor - var octicon = viewServices.octicon - var fileSize = viewServices.fileSize //- View constants - var maxReaders = 10 //- Variables - var githubUrl = 'https://github.com/' + repo.full_name - var cloneUrl = repo.clone_url || repo.otherFields.clone_url - var sshUrl = repo.ssh_url || repo.otherFields.ssh_url - var admin = repoPermissions && repoPermissions.allowAdministration .container .row: .col-md-12 if fromReposPage .nav ul.pager.zero-pad-bottom li.previous a(href='javascript:window.history.back()') span(aria-hidden=true) &larr; = ' Back' - var hugeHeading = repo.name.length < 33 h1(class={huge: hugeHeading}) a(href='https://github.com/' + repo.full_name, target="_blank")= repo.name if repo.private === true | &nbsp; .label.label-warning(class={shrink66: !hugeHeading, shrink50: hugeHeading}) Private h6= repo.full_name.replace('/' + repo.name, '') + ' organization' if repo.description p.lead=repo.description .row .col-md-8 small ul.list-inline.list-horizontal-space //- NOTES: //- * Can minimize some; 10,827 becomes 10.8k, for example if repo.language li span(style={color: languageColor(repo.language)}) != octicon('primitive-dot', 10) = ' ' + repo.language if repo.license && repo.license.featured && repo.license.name li != octicon('law', 15) = ' ' + repo.license.name if repo.stargazers_count li != octicon('star', 15) = ' ' + repo.stargazers_count.toLocaleString() if repo.forks_count li != octicon('repo-forked', 12) = ' ' + repo.forks_count.toLocaleString() if repo.size li != octicon('database', 13) = ' ' + fileSize(repo.size * 1024) ul.list-inline(style='margin-top:24px;margin-bottom:48px') li: a.btn.btn-sm.btn-muted-more(href=githubUrl, target="_blank", title='View ' + repo.full_name + ' on GitHub.com') = 'Open on GitHub.com ' != octicon('mark-github', 18) if cloneUrl li: a.btn.btn-sm.btn-muted-more(href='https://github.com/' + repo.full_name, target="_blank") HTTPS clone if sshUrl li: a.btn.btn-sm.btn-muted-more(href='https://github.com/' + repo.full_name, target="_blank") SSH clone if repo.homepage li: a.btn.btn-sm.btn-muted-more(href=repo.homepage, target='_new', title=repo.homepage) Homepage if repo.moment //-h2 Timeline ul.list-inline.list-horizontal-space if repo.moment.pushed_at li | Commits pushed br strong= repo.moment.pushed_at if repo.moment.updated_at li | Updated br strong= repo.moment.updated_at if repo.moment.created_at li | Created br strong= repo.moment.created_at if extensions //- Extensions if extensions.cla - var cla = extensions.cla hr h2 Contribution license agreements if !extensions.cla.supported p. The #{organization.name} organization is not currently configured for CLA. Please reach out if you believe this is incorrect. It is important for all organizations to have CLA support enabled. else if !cla.enabled .alert.alert-warning p. #[strong This repository does not currently have the CLA bot enabled] #[br] The repository administrators and writers should not accept pull requests from unknown GitHub users until the CLA is enabled. p else if cla.legalEntity p The CLA is enabled for the #{cla.legalEntity} legal entity. ul.list-inline.list-horizontal-space if cla.mails li | Notifying br strong= cla.mails if cla.legalEntity li | Legal entity br strong= cla.legalEntity if cla.updatedOn && cla.updatedOn.fromNow li | Configuration updated br strong= cla.updatedOn.fromNow() ul.list-inline if admin li: a.btn.btn-sm.btn-default(href='/' + repo.organization.name + '/repos/' + repo.name + '/extensions/cla') Configure the #{cla.legalEntity} CLA if cla.learnMoreUrl li: a.btn.btn-sm.btn-muted-more(href=cla.learnMoreUrl, target='_blank') Learn more .col-md-4 if admin h3 Repository management ul.list-unstyled.list-vspace li a.btn.btn-sm( class=repoPermissions.admin ? 'btn-primary' : 'btn-muted-more', href=githubUrl, target="_blank", title='View ' + repo.full_name + ' on GitHub.com') = 'Manage on GitHub.com ' != octicon('mark-github', 18) if !repoPermissions.admin li: p. You are not a GitHub repo administrator for this repo and may not be able to manage all settings directly on GitHub. li: a.btn.btn-sm.btn-muted-more( href='https://github.com/' + repo.full_name + '/settings', target='_blank' ) GitHub Settings li: a.btn.btn-sm.btn-muted-more( href='https://github.com/' + repo.full_name + '/settings/collaboration', target='_blank' ) Outside collaborators li: a.btn.btn-sm.btn-muted-more( href='https://github.com/' + repo.full_name + '/settings/collaboration', target='_blank' ) Team permissions li: a.btn.btn-sm.btn-muted-more( href='https://github.com/' + repo.full_name + '/settings/hooks', target='_blank' ) Webhooks if extensions h4 Services ul.list-unstyled.list-vspace if extensions.cla && extensions.cla.supported li: a.btn.btn-sm.btn-muted-more(href='/' + repo.organization.name + '/repos/' + repo.name + '/extensions/cla') Configure the #{cla.legalEntity} CLA if !repo.private && extensions.npm && extensions.npm.supported li: a.btn.btn-sm.btn-muted-more(href='/' + repo.organization.name + '/repos/' + repo.name + '/extensions/npm') NPM publishing #metrics hr h3 Metrics .row .col-md-6 ul.list-unstyled.list-vspace li a(href='https://github.com/' + repo.full_name + '/graphs/contributors', target='_new') != octicon('organization', 20) = ' Contributors: ' span#contributors li a(href='https://github.com/' + repo.full_name + '/watchers', target='_new') != octicon('eye', 20) = ' Subscribers: ' span#subscribers li a(href='https://github.com/' + repo.full_name + '/issues', target='_new') != octicon('issue-opened', 20) = ' Open issues: ' span#openIssues li a(href='https://github.com/' + repo.full_name + '/issues?q=is%3Aissue+is%3Aclosed', target='_new') != octicon('issue-closed', 20) = ' Closed issues: ' span#closedIssues li Pull requests:&nbsp; strong: span#pullRequests li Closed pull requests:&nbsp; strong: span#closedPullRequests li Open pull requests:&nbsp; strong: span#openPullRequests li Average days taken to close pull requests:&nbsp; strong: span#avgDaysToClosePRs li Average days taken for first response to pull requests:&nbsp; strong: span#avgDaysForFirstResponseToPRs li Pull requests opened today:&nbsp; strong: span#prsOpenedToday li Pull requests closed today:&nbsp; strong: span#prsClosedToday li Average days taken to close issues:&nbsp; strong: span#avgDaysToCloseIssues li Average days taken for first response to open issues:&nbsp; strong: span#avgDaysForFirstResponseToIssues li Issues opened today:&nbsp; strong: span#openedIssuesToday li Issues closed today:&nbsp; strong: span#closedIssuesToday li Commits:&nbsp; strong: span#commits li Lines committed:&nbsp; strong: span#linesCommitted li Commits made today:&nbsp; strong: span#commitsToday li Lines committed today:&nbsp; strong: span#linesCommittedToday .col-md-6 #metricsChart .row: .col-md-12: hr if permissions - var zeroTeams = !permissions.pull && !permissions.push && !permissions.admin .row: .col-md-12 h2 Team permissions if zeroTeams p. #[span.text-danger There are no teams defined for this repo.] Teams are the GitHub-recommended way to manage repository permissions for organizations. Please work with your administrators to define a team experience for this repo. .row if permissions.pull .col-md-4 h4 Read #[small Pull] //-if repo.private p This is a private repository, so only the following teams have read access. +teamsList(permissions.pull) if permissions.push .col-md-4 h4 Write #[small Accept pull requests] +teamsList(permissions.push) if !zeroTeams .col-md-4 h4 Admin #[small Manage settings] if permissions.admin +teamsList(permissions.admin) else p.text-danger This repository has no admin teams defined. if teamBasedPermissions .row: .col-md-12: h2 Users granted permissions by teams .row if teamBasedPermissions.readers && teamBasedPermissions.readers.length .col-md-4 h3 Readers #[span.badge=teamBasedPermissions.readers.length.toLocaleString()] if teamBasedPermissions.readers.length > maxReaders p. Accounts that can clone and view the repository. #[span.text-muted Only #{maxReaders} of #{teamBasedPermissions.readers.length.toLocaleString()} accounts with read access are being displayed] ul.list-unstyled - var k = 0 each readerCollaborator in teamBasedPermissions.readers - k++ if k < maxReaders li div.clearfix +simplePersonView(readerCollaborator.user) if teamBasedPermissions.writers && teamBasedPermissions.writers.length .col-md-4 h3 Writers #[span.badge=teamBasedPermissions.writers.length.toLocaleString()] p These accounts have the ability to accept pull requests and directly commit to the repo ul.list-unstyled each writerCollaborator in teamBasedPermissions.writers li div.clearfix +simplePersonView(writerCollaborator.user) if teamBasedPermissions.administrators && teamBasedPermissions.administrators.length .col-md-4 h3 Administrators #[span.badge=teamBasedPermissions.administrators.length.toLocaleString()] p Owners of the repo, able to manage repo and team settings ul.list-unstyled each adminCollaborator in teamBasedPermissions.administrators li div.clearfix +simplePersonView(adminCollaborator.user) if outsideCollaborators && outsideCollaborators.length .row: .col-md-12: h2 Outside collaborators .row if outsideCollaboratorsSlice.administrators && outsideCollaboratorsSlice.administrators.length .col-md-4 h3 Administrators #[span.badge=outsideCollaboratorsSlice.administrators.length.toLocaleString()] p Owners of the repo, able to manage repo and team settings ul.list-unstyled each adminCollaborator in outsideCollaboratorsSlice.administrators li div.clearfix +simplePersonView(adminCollaborator) if outsideCollaboratorsSlice.writers && outsideCollaboratorsSlice.writers.length .col-md-4 h3 Writers #[span.badge=outsideCollaboratorsSlice.writers.length.toLocaleString()] p These accounts have the ability to accept pull requests and directly commit to the repo ul.list-unstyled each writerCollaborator in outsideCollaboratorsSlice.writers li div.clearfix +simplePersonView(writerCollaborator) //- We ignore outsideCollaboratorsSlice.readers for this view if outsideCollaboratorsSlice.readers && outsideCollaboratorsSlice.readers.length .col-md-4 h3 Readers #[span.badge=outsideCollaboratorsSlice.readers.length.toLocaleString()] if outsideCollaboratorsSlice.readers.length > maxReaders p. Accounts that can clone and view the repository. #[span.text-muted Only #{maxReaders} of #{outsideCollaboratorsSlice.readers.length.toLocaleString()} accounts with read access are being displayed] ul.list-unstyled - var k = 0 each readerCollaborator in outsideCollaboratorsSlice.readers - k++ if k < maxReaders li div.clearfix +simplePersonView(readerCollaborator) if collaboratorsArray && collaboratorsArray.length .row .col-md-12 h2 Individual permissions p. These are collaborators that have been directly added to this repository. For open source projects, Outside Collaborators are the GitHub-recommended way to grant contributor rights to repositories. p. In general, members of the organization should not be added as collaborators, as teams on GitHub are much more flexible. .row if collaborators.administrators && collaborators.administrators.length .col-md-4 h3 Administrators #[span.badge=collaborators.administrators.length.toLocaleString()] p Owners of the repo, able to manage repo and team settings ul.list-unstyled each adminCollaborator in collaborators.administrators li div.clearfix +simplePersonView(adminCollaborator) if collaborators.writers && collaborators.writers.length .col-md-4 h3 Writers #[span.badge=collaborators.writers.length.toLocaleString()] p These accounts have the ability to accept pull requests and directly commit to the repo ul.list-unstyled each writerCollaborator in collaborators.writers li div.clearfix +simplePersonView(writerCollaborator) //- We ignore collaborators.readers for this view if collaborators.readers && collaborators.readers.length .col-md-4 h3 Readers #[span.badge=collaborators.readers.length.toLocaleString()] if collaborators.readers.length > maxReaders p. Accounts that can clone and view the repository. #[span.text-muted Only #{maxReaders} of #{collaborators.readers.length.toLocaleString()} accounts with read access are being displayed] ul.list-unstyled - var k = 0 each readerCollaborator in collaborators.readers - k++ if k < maxReaders li div.clearfix +simplePersonView(readerCollaborator) if systemPermissions && (systemPermissions.pull || systemPermissions.push || systemPermissions.admin) .row: .col-md-12 h2 System team permissions p. System teams are used by corporate open source automation systems including Contribution License Agreements and compliance needs. These teams are not intended for general use but are provided here to help repo admins understand how GitHub permissions are configured. .row if systemPermissions.pull .col-md-4 h4 Read +teamsList(systemPermissions.pull, true) if systemPermissions.push .col-md-4 h4 Write +teamsList(systemPermissions.push, true) if systemPermissions.admin .col-md-4 h4 Admin +teamsList(systemPermissions.admin, true) hr if repo.id p small if repo.id = 'GitHub repository ID: ' + repo.id if repoMetrics br | This page contains historical metrics that may be delayed if reposDataAgeInformation p.text-primary(style='margin-bottom:24px') if reposDataAgeInformation.changed = 'Updated ' + reposDataAgeInformation.changed if reposDataAgeInformation.updated && reposDataAgeInformation.changed |, refreshed else | Refreshed if reposDataAgeInformation.updated = ' ' + reposDataAgeInformation.updated <file_sep>/views/org/publicMembershipStatus.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content div.container if publicMembership h1 | Thanks for supporting #{org.name} p.lead Your membership is currently public p If you conceal your membership, you won't appear in the #{org.name} member list or count. else h1 Want to publish your #{org.name} org membership? p.lead Show your support for #{config.brand.companyName} open source! p Publicizing your org memberships is great for building community and reputation. If you to make your membership public, GitHub shows the "#{org.name}" avatar on <a href="https://github.com/#{user.github.username}" target="_new">your public GitHub profile</a> and shows you on the <a href="https://github.com/orgs/#{org.name}/people" target="_new">members page for #{org.name}</a>. if user && user.github.increasedScope === true p &nbsp; form(method='post') p input.btn.btn-lg.btn-primary(type='submit', value=publicMembership ? 'Conceal my membership' : 'Make my membership public', name=publicMembership ? 'conceal' : 'publicize') | &nbsp; &nbsp; if onboarding || joining a.btn.btn-default.btn-lg(href=org.baseUrl + 'teams' + teamPostfix) if publicMembership | Keep it public else | Keep it hidden else a.btn.btn-default.btn-lg(href=org.baseUrl) Cancel else p.lead Quick way: Authorize us to publish on your behalf p. Temporarily authorize this site to have <a href="https://developer.github.com/v3/oauth/#scopes" target="_new"><code>write:org</code></a> permissions and immediately publish your org membership. p a.btn.btn-primary.btn-lg(href='/signin/github/increased-scope') Publish Membership if (!(onboarding || joining)) | &nbsp; &nbsp; a.btn.btn-default(href=org.baseUrl) Cancel if onboarding || joining p.lead Manual way: Follow this process ul li Head over to the <a href="https://github.com/orgs/#{org.name}/people">#{org.name} list of people</a>. li Find yourself in the list and change the Public/Private setting. p a.btn.btn-default.btn-lg(href=org.baseUrl + 'teams' + (onboarding ? '?onboarding=' + onboarding : '')) Continue without publishing if onboarding p &nbsp; hr h3 Your onboarding progress h5 | Sign in to your GitHub &amp; #{config.brand.companyName} accounts&nbsp; i.glyphicon.glyphicon-ok h5 | Link your accounts&nbsp; i.glyphicon.glyphicon-ok h5 | Join your first GitHub organization&nbsp; i.glyphicon.glyphicon-ok h5 | Multifactor security checkup&nbsp; i.glyphicon.glyphicon-ok h5 | Profile review&nbsp; i.glyphicon.glyphicon-ok h5.text-primary | Publish your membership <em>(optional)</em> h5 | Join a team <em>(optional)</em> <file_sep>/views/email/repoApprovals/requestSubmitted.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../body block content h2 | #{approvalRequest.org}/#{approvalRequest.repoName} | &nbsp; small= approvalRequest.repoVisibility p. Your request has been submitted. if approvalRequest.repoVisibility === 'public' p This request is for a public repo, so if your request is approved, the repo will be visibility immediately. If you did not intend for this to go public, please cancel this request. else if approvalRequest.repoVisibility === 'private' p This request is for a private repo. Please note that there may be a limit to the number of private repos for the organization. p Per Microsoft policy, GitHub is not a permitted source control system for MBI source code and is only intended for open source projects, or those currently in the release approval process pending going public as open source. This request may be denied if there is reason to believe that it is a private engineering effort. Please consider VSTS/1ES for any internal projects. p a(href=reposSiteUrl + 'approvals/', style='display:inline-block;background-color:#eee;font-size:18px;padding:12px') Review your requests here table.technical(style='width:80%') thead tr th(colspan=2) Requested by tbody tr td p a(href='https://github.com/' + approvalRequest.ghu)= approvalRequest.ghu td= approvalRequest.email thead tr th(colspan=2) Repo information tbody tr td GitHub organization td= approvalRequest.org tr td Repository name td= approvalRequest.repoName if approvalRequest.repoDescription tr td Repo description td= approvalRequest.repoDescription tr td Initial visibility td= approvalRequest.repoVisibility === 'public' ? 'Public' : 'Private' if approvalRequest.justification thead tr th(colspan=2) Justification tr td(colspan=2)= approvalRequest.justification thead tr th(colspan=2) Permissions if approvalRequest.teamsCount tr td Authorized teams td= approvalRequest.teamsCount p If you prefer not to click on email links, you can find your requests by: ul li Navigating to the open source repos site at #{reposSiteUrl} li Go to the <em>/approvals</em> URL li Review your request details <file_sep>/views/organization/errorsList.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content div.container p a.btn.btn-sm.btn-default(href='/organization/') Back h1 #{errors.length} Active (Untriaged) Errors div.container if errors && errors.length && errors.length > 0 each error in errors form(method='post', action='/organization/errors/' + error.PartitionKey + '/' + error.RowKey + '/') input.btn.btn-sm.btn-primary(type='submit', name='action', value='Delete') | &nbsp; &nbsp; input.btn.btn-sm.btn-default(type='submit', name='action', value='Archive') | &nbsp; &nbsp; if error.t - var seconds = (error.t * 1) time.lead(datetime=(new Date(seconds)).toISOString())= (new Date(seconds)).toDateString() if error.url h3 = error.url if error.status | &nbsp; HTTP #{error.status} if error.e h2= error.e if error.json - var json = JSON.parse(error.json) if json h5 METADATA ul.list-inline each js, val in json if val == 'innerError' // Painful but important look into objects while avoiding circular JSON references li <strong>innerError</strong>: ul each js2, val2 in json.innerError if js2 && js2.toString() == '[object Object]' li <strong>#{val2}</strong>: ul each js3, val3 in js2 if js3 && js3.toString() == '[object Object]' li <strong>#{val3}</strong>: ul each js4, val4 in js3 if js4 && js4.toString() == '[object Object]' li <strong>#{val4}</strong>: ul each js5, val5 in js4 li <strong>#{val5}:</strong>: #{js5} else li <strong>#{val4}</strong>: #{js4} else li <strong>#{val3}</strong>: #{js3} else li <strong>#{val2}</strong>: #{js2} else li <strong>#{val}</strong>: #{js} div.row div.col-sm-6.col-md-6.col-lg-6 h5 CORRELATION ID p= error.cid div.col-sm-6.col-md-6.col-lg-6 h5 USERNAME p a.btn.btn-muted.btn-sm(href='/organization/whois/github/' + error.ghu, target='_new')= error.ghu | &nbsp; a.btn.btn-muted.btn-sm(href='/organization/whois/github/' + error.ghu, target='_new') Whois div.row div.col-sm-6.col-md-6.col-lg-6 h5 PARTITION KEY p= error.PartitionKey div.col-sm-6.col-md-6.col-lg-6 h5 ROW KEY p= error.RowKey div.row div.col-sm-6.col-md-6.col-lg-6 h5 HOST p= error.host div.col-sm-6.col-md-6.col-lg-6 h5 USER AGENT p= error.ua div.row div.col-sm-6.col-md-6.col-lg-6 if error.referer h5 REFERER p= error.referer div.col-sm-6.col-md-6.col-lg-6 if error.stack pre= error.stack hr else p No active errors found. <file_sep>/lib/tableEncryption.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; // Status: Partial Functionality // This module wraps a table service client to provide encryption and decryption trying // to reuse existing helper code. It only wraps what is needed for this library to work. // ---------------------------------------------------------------------------- // This is a Node.js implementation of client-side table entity encryption, // compatible with the official Azure storage .NET library. // ---------------------------------------------------------------------------- const async = require('async'); const encryption = require('./encryption'); function retrieveEntity() { const args = Array.prototype.slice.call(arguments); const tableClient = args.shift(); const encryptionOptions = args.shift(); // args[0] is the tableName; const partitionKey = args[1]; const rowKey = args[2]; const callback = args.pop(); args.push((error, result, response) => { if (error) { return callback(error); } const reducedEntity = encryptionOptions.tableDehydrator(result); encryption.decryptEntity(partitionKey, rowKey, reducedEntity, encryptionOptions, (decryptError, entity) => { if (decryptError) { return callback(decryptError); } const hydrated = encryptionOptions.tableRehydrator(partitionKey, rowKey, entity); return callback(null, hydrated, response); }); }); tableClient.retrieveEntity.apply(tableClient, args); } function queryEntitiesCallback(encryptionOptions, callback, error, results, headers) { if (error) { error.headers = headers; return callback(error); } if (!(results && results.entries && results.entries.length > 0)) { return callback(null, results, headers); } const entities = results.entries; async.map(entities, (row, next) => { if (row === undefined || row.PartitionKey === undefined || row.PartitionKey._ === undefined) { return next(new Error('Entity does not have a PartitionKey.')); } if (row === undefined || row.RowKey === undefined || row.RowKey._ === undefined) { return next(new Error('Entity does not have a RowKey.')); } const partitionKey = row.PartitionKey._; const rowKey = row.RowKey._; let reducedEntity = null; try { reducedEntity = encryptionOptions.tableDehydrator(row); } catch (rex) { return next(rex); } encryption.decryptEntity(partitionKey, rowKey, reducedEntity, encryptionOptions, (decryptError, entity) => { if (decryptError) { return next(decryptError); } const hydrated = encryptionOptions.tableRehydrator(partitionKey, rowKey, entity); return next(null, hydrated); }); }, (asyncError, decryptedRows) => { if (asyncError) { return callback(asyncError); } results.entries = decryptedRows; return callback(null, results); }); } function queryEntities() { const args = Array.prototype.slice.call(arguments); const tableClient = args.shift(); const encryptionOptions = args.shift(); const callback = args.pop(); args.push(queryEntitiesCallback.bind(undefined, encryptionOptions, callback)); tableClient.queryEntities.apply(tableClient, args); } function insertEntity() { const args = Array.prototype.slice.call(arguments); const tableClient = args.shift(); const encryptionOptions = args.shift(); const entity = args[1]; const partitionKey = entity.PartitionKey._; const rowKey = entity.RowKey._; const reducedEntity = encryptionOptions.tableDehydrator(entity); const callback = args[args.length - 1]; encryption.encryptEntity(partitionKey, rowKey, reducedEntity, encryptionOptions, (encryptError, encryptedEntity) => { if (encryptError) { return callback(encryptError); } args[1] /* entity */ = encryptionOptions.tableRehydrator(partitionKey, rowKey, encryptedEntity); tableClient.insertEntity.apply(tableClient, args); }); } function replaceEntity() { const args = Array.prototype.slice.call(arguments); const tableClient = args.shift(); const encryptionOptions = args.shift(); const entity = args[1]; const partitionKey = entity.PartitionKey._; const rowKey = entity.RowKey._; const reducedEntity = encryptionOptions.tableDehydrator(entity); const callback = args[args.length - 1]; encryption.encryptEntity(partitionKey, rowKey, reducedEntity, encryptionOptions, (encryptError, encryptedEntity) => { if (encryptError) { return callback(encryptError); } args[1] /* entity */ = encryptionOptions.tableRehydrator(partitionKey, rowKey, encryptedEntity); tableClient.replaceEntity.apply(tableClient, args); }); } function mergeEntity() { const args = Array.prototype.slice.call(arguments); const callback = args.pop(); return callback(new Error('Entity merge operations are not supported when using table encryption.')); } module.exports = function wrapTableClient(tableClient, encryptionOptions) { const wrapped = { insertEntity: insertEntity.bind(undefined, tableClient, encryptionOptions), mergeEntity: mergeEntity.bind(undefined, tableClient, encryptionOptions), queryEntities: queryEntities.bind(undefined, tableClient, encryptionOptions), replaceEntity: replaceEntity.bind(undefined, tableClient, encryptionOptions), retrieveEntity: retrieveEntity.bind(undefined, tableClient, encryptionOptions), }; const passthru = [ 'createTableIfNotExists', 'deleteEntity', ]; for (let i = 0; i < passthru.length; i++) { const name = passthru[i]; wrapped[name] = tableClient[name].bind(tableClient); } return wrapped; }; <file_sep>/views/email/body.pug //- //- Copyright (c) Microsoft. All rights reserved. //- block content != content <file_sep>/routes/org/team/approval/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); const async = require('async'); const emailRender = require('../../../../lib/emailRender'); const utils = require('../../../../utils'); function teamsInfoFromRequest(team, approvalRequest, callback) { var oss = team.oss; if (approvalRequest.teamsCount) { var count = parseInt(approvalRequest.teamsCount, 10); var detailedTeams = []; for (var i = 0; i < count; i++) { var key = 'teamid' + i; if (approvalRequest[key] && approvalRequest[key + 'p']) { detailedTeams.push({ id: approvalRequest[key], permission: approvalRequest[key + 'p'], }); } } async.map(detailedTeams, function (basic, cb) { var permission = basic.permission; oss.getTeam(basic.id, function (error, teamInstance) { if (teamInstance) { teamInstance._temporary_permission = permission; } cb(error, teamInstance); }); }, callback); } else { callback(); } } router.get('/', function (req, res) { var approvalRequest = req.approvalEngine.request; var oss = req.oss; var team = req.team; teamsInfoFromRequest(team, approvalRequest, function (error, expandedTeamInfo) { // Ignoring any errors for now. if (approvalRequest.requested) { var asInt = parseInt(approvalRequest.requested, 10); approvalRequest.requestedTime = new Date(asInt); } if (approvalRequest.decisionTime) { approvalRequest.decisionTime = new Date(parseInt(approvalRequest.decisionTime, 10)); } oss.render(req, res, 'org/team/approveStatus', 'Request Status', { entry: approvalRequest, requestingUser: req.approvalEngine.user, expandedTeamInfo: expandedTeamInfo, team: team, teamUrl: req.teamUrl, }); }); }); router.get('/edit', function (req, res, next) { var approvalEngine = req.approvalEngine; if (approvalEngine.editGet) { return approvalEngine.editGet(req, res, next); } next(new Error('Editing is not supported for this request type.')); }); router.post('/edit', function (req, res, next) { var approvalEngine = req.approvalEngine; if (approvalEngine.editPost) { return approvalEngine.editPost(req, res, next); } next(new Error('Editing is not supported for this request type.')); }); router.get('/setNote/:action', function (req, res) { var engine = req.approvalEngine; var action = req.params.action; if (action == 'approveWithComment') { action = 'approve'; } engine.team.oss.render(req, res, 'org/team/approveStatusWithNote', 'Record your comment for request ' + engine.id + ' (' + action + ')', { entry: engine.request, action: action, requestingUser: engine.user, team: req.team, teamUrl: req.teamUrl, }); }); router.post('/', function (req, res, next) { var engine = req.approvalEngine; var requestid = engine.id; var team = engine.team; var org = req.org; var dc = req.app.settings.dataclient; const config = req.app.settings.runtimeConfig; if (!req.body.text && req.body.deny) { return res.redirect(req.teamUrl + 'approvals/' + requestid + '/setNote/deny'); } if (req.body.reopen) { req.oss.saveUserAlert(req, 'Request re-opened.', engine.typeName, 'success'); return dc.updateApprovalRequest(requestid, { active: true }, function () { res.redirect(req.teamUrl + 'approvals/' + requestid); }); } if (!req.body.text && req.body.approveWithComment) { return res.redirect(req.teamUrl + 'approvals/' + requestid + '/setNote/approveWithComment'); } const repoApprovalTypesValues = config.github.approvalTypes.repo; if (repoApprovalTypesValues.length === 0) { return next(new Error('No repo approval providers configured.')); } const repoApprovalTypes = new Set(repoApprovalTypesValues); const mailProviderInUse = repoApprovalTypes.has('mail'); var issueProviderInUse = repoApprovalTypes.has('github'); if (!mailProviderInUse && !issueProviderInUse) { return next(new Error('No configured approval providers configured.')); } const mailProvider = req.app.settings.mailProvider; if (!mailProvider) { return next(new Error('A mail provider has been requested but a provider instance could not be found.')); } const mailAddressProvider = req.app.settings.mailAddressProvider; // Approval workflow note: although the configuration may specify just a mail // provider today, there may actually be an issue that was opened at the time // of the request. So we will attempt to close any issues if the request has // an issue ID. var action = req.body.approveWithComment || req.body.approve ? 'approve' : 'deny'; var bodyText = req.body.text; var oss = req.oss; var friendlyErrorMessage = 'Whoa? What happened?'; var pendingRequest = engine.request; var notificationRepo = null; var issueId = pendingRequest.issue; var userMailAddress = null; try { if (issueId) { notificationRepo = org.getWorkflowRepository(); } } catch (noWorkflowRepoError) { // No provider configured issueId = undefined; issueProviderInUse = false; } var issue = null; async.waterfall([ function getMailAddressForUser(callback) { const upn = pendingRequest.email; mailAddressProvider.getAddressFromUpn(upn, (resolveError, mailAddress) => { if (resolveError) { return callback(resolveError); } userMailAddress = mailAddress; callback(); }); }, function commentOnIssue(callback) { if (!issueId) { return callback(); } issue = notificationRepo.issue(issueId); var bodyAddition = engine.messageForAction(action); if (bodyText !== undefined) { bodyAddition += '\n\nA note was included with the decision and can be viewed by team maintainers and the requesting user.'; } var comment = bodyAddition + '\n\n<small>This was generated by the Open Source Portal on behalf of ' + oss.usernames.github + '.</small>'; if (pendingRequest.ghu) { comment += '\n\n' + 'FYI, @' + pendingRequest.ghu + '\n'; } friendlyErrorMessage = 'While trying to comment on issue #' + issue.number + ', an error occurred.'; issue.createComment(comment, (commentError) => { if (commentError && mailProviderInUse) { issue = null; issueProviderInUse = false; } callback(commentError); }); }, function updateRequest() { var callback = arguments[arguments.length - 1]; var requestUpdates = { decision: action, active: false, decisionTime: (new Date().getTime()).toString(), decisionBy: oss.usernames.github, decisionNote: bodyText, decisionEmail: oss.modernUser().contactEmail(), }; var updatedRequest = Object.assign({}, pendingRequest); Object.assign(updatedRequest, requestUpdates); friendlyErrorMessage = 'The approval request information could not be updated, indicating a data store problem potentially. The decision may not have been recorded.'; dc.replaceApprovalRequest(requestid, updatedRequest, callback); }, function performApprovalOperations() { var callback = arguments[arguments.length - 1]; if (action == 'approve') { engine.performApprovalOperation(callback); } else { callback(); } }, function closeIssue() { var callback = arguments[arguments.length - 1]; if (!issue) { return callback(); } friendlyErrorMessage = 'The issue #' + issue.number + ' that tracks the request could not be closed.'; issue.close(callback); }, function () { friendlyErrorMessage = null; var callback = arguments[arguments.length - 1]; if (action == 'approve' && engine.generateSecondaryTasks) { engine.generateSecondaryTasks(callback); } else { callback(); } }, // Secondary tasks run after the primary and in general will not // fail the approval operation. By sending an empty error callback // but then an object with an error property set, the operation // that failed can report status. Whether an error or not, a // message property will be shown for each task result. function () { friendlyErrorMessage = null; var tasks = arguments.length > 1 ? arguments[0] : []; var callback = arguments[arguments.length - 1]; async.series(tasks, callback); }, ], function (error, output) { if (error) { if (friendlyErrorMessage) { error = utils.wrapError(error, friendlyErrorMessage); } return next(error); } var secondaryErrors = false; if (output && output.length) { output.forEach((secondaryResult) => { if (secondaryResult.error) { secondaryErrors = true; try { var extraInfo = { eventName: 'ReposRequestSecondaryTaskError', }; if (secondaryResult.error.data) { Object.assign(extraInfo, secondaryResult.error.data); } if (secondaryResult.error.headers) { Object.assign(extraInfo, secondaryResult.error.headers); } req.insights.trackException(secondaryResult.error, extraInfo); } catch (unusedError) { // never want this to fail } } }); } req.oss.saveUserAlert(req, 'Thanks for processing the request with your ' + action.toUpperCase() + ' decision.', engine.typeName, 'success'); function sendDecisionMail() { const wasApproved = action == 'approve'; const contentOptions = { correlationId: req.correlationId, pendingRequest: pendingRequest, version: config.logging.version, results: output, wasApproved: wasApproved, decisionBy: oss.usernames.github, decisionNote: bodyText, decisionEmail: oss.modernUser().contactEmail(), }; if (!engine.getDecisionEmailViewName || !engine.getDecisionEmailSubject) { return req.insights.trackException(new Error('No getDecisionEmailViewName available with the engine.'), Object.assign({ eventName: 'ReposRequestDecisionMailRenderFailure', }, contentOptions)); } const getDecisionEmailViewName = engine.getDecisionEmailViewName(); emailRender.render(req.app.settings.basedir, getDecisionEmailViewName, contentOptions, (renderError, mailContent) => { if (renderError) { return req.insights.trackException(renderError, Object.assign({ eventName: 'ReposRequestDecisionMailRenderFailure', }, contentOptions)); } // TODO: remove spike: adding the GitHub admin alias if there is a secondary failure var recipients = [userMailAddress]; if (secondaryErrors) { recipients.push('<EMAIL>'); } const mail = { to: recipients, subject: engine.getDecisionEmailSubject(wasApproved, pendingRequest), reason: (`You are receiving this e-mail because of a request that you created, and a decision has been made. This mail was sent to: ${pendingRequest.email}`), content: mailContent, headline: engine.getDecisionEmailHeadline(wasApproved, pendingRequest), classification: wasApproved ? 'information' : 'warning', service: 'Microsoft GitHub', correlationId: req.correlationId, }; mailProvider.sendMail(mail, (mailError, mailResult) => { var customData = Object.assign({ receipt: mailResult, }, contentOptions); if (mailError) { customData.eventName = 'ReposRequestDecisionMailFailure'; req.insights.trackException(mailError, customData); } else { req.insights.trackEvent('ReposRequestDecisionMailSuccess', customData); } }); }); } if (mailProviderInUse) { sendDecisionMail(); } if (action !== 'approve' || !engine.getApprovedViewName) { return res.redirect(req.teamUrl); } oss.render(req, res, engine.getApprovedViewName(), 'Approved', { pendingRequest: pendingRequest, results: output, team: team, teamUrl: req.teamUrl, }); }); }); module.exports = router; <file_sep>/lib/mailAddressProvider/mockMailAddressProvider.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = function createMailAddressProvider() { const upnToEmails = new Map(); return { getAddressFromUpn: (upn, callback) => { if (upnToEmails.has(upn)) { return callback(null, upnToEmails.get(upn)); } callback(new Error(`No e-mail address known for "${upn}".`)); }, // testability: getUpnToEmails: function () { return upnToEmails; } }; }; <file_sep>/views/settings/layout.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout.pug block content .container .row .col-md-3.col-lg-3 .panel.panel-default .panel-heading Personal settings .list-group a.list-group-item(href='/settings', class={active: view === 'settings'}) Profile .panel.panel-default .panel-heading Authorizations & accounts .list-group a.list-group-item(href='/settings/authorizations', class={active: view === 'settings/authorizations'}) GitHub a.list-group-item(href='/settings/npm', class={active: view === 'settings/npm'}) NPM .panel.panel-default .panel-heading Reports .list-group a.list-group-item(href='/settings/digestReports', class={active: view === 'settings/digestReports'}) Administrator reports .col-md-7.col-lg-7 block content <file_sep>/lib/ossManagementDb.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const sql = require('mssql'); exports.getClaRepositorySettings = function getClaRepositorySettings(dbConection, repoId, callback) { const query = `SELECT * FROM dbo.Repositories WHERE ObjectId = '${repoId}'`; const request = new sql.Request(dbConection); request.query(query, (queryError, results) => { if (queryError) { return callback(queryError); } var result = null; if (results && results.length > 0) { result = results[0]; } return callback(null, result); }); }; exports.upsertClaRepositoryData = function upsertClaRepositoryData(dbConection, claData, callback) { const selectQuery = `SELECT Id FROM dbo.Repositories WHERE ObjectId = '${claData.repoGitHubId}'`; const request = new sql.Request(dbConection); request.query(selectQuery, (err, results) => { if (err) { return callback(err); } if (results.length === 0) { // Insert new entry const insertQuery = 'INSERT INTO dbo.Repositories(Name, OrganizationId, Description, IsPrivate, Provider, ObjectId, AutomateCLA, CLAHookId, ' + 'NotifierEmails, LicenseId, CreatedOn, CreatedAtProvider, UpdatedAtProvider, SourceUrl, IsFork, IsRepoIgnored, CreatedBy, TransferRequested) ' + 'VALUES(@name, @organizationId, @description, @isPrivate, 0, @repoGitHubId, \'True\', @webHookId, ' + '@emails, @licenseId, GETDATE(), @createdAtProvider, @updatedAtProvider, @sourceUrl, @isFork, \'False\', \'ospo-repos\', \'False\')'; new sql.Request(dbConection) .input('name', claData.repoName) .input('organizationId', claData.organizationId) .input('description', claData.description) .input('isPrivate', claData.isPrivate) .input('repoGitHubId', claData.repoGitHubId) .input('webHookId', claData.webHookId) .input('emails', claData.emails) .input('licenseId', claData.licenseId) .input('createdAtProvider', claData.createdAt) .input('updatedAtProvider', claData.updatedAt) .input('sourceUrl', claData.sourceUrl) .input('isFork', claData.isFork) .query(insertQuery, (error) => { return callback(error ? error : null); }); } else { // Update existing entry const id = results[0].Id; const updateQuery = 'UPDATE dbo.Repositories SET Name=@name, OrganizationId=@organizationId, Description=@description, ' + 'IsPrivate=@isPrivate, Provider=0, ObjectId=@repoGitHubId, AutomateCLA=\'True\', CLAHookId=@webHookId, NotifierEmails=@emails, ' + 'LicenseId=@licenseId, UpdatedOn=GETDATE(), CreatedAtProvider=@createdAtProvider, UpdatedAtProvider=@updatedAtProvider, ' + 'SourceUrl=@sourceUrl, IsFork=@isFork, IsRepoIgnored=\'False\', LastUpdatedBy=\'ospo-repos\', TransferRequested=\'False\' WHERE Id=@id'; new sql.Request(dbConection) .input('name', claData.repoName) .input('organizationId', claData.organizationId) .input('description', claData.description) .input('isPrivate', claData.isPrivate) .input('repoGitHubId', claData.repoGitHubId) .input('webHookId', claData.webHookId) .input('emails', claData.emails) .input('licenseId', claData.licenseId) .input('createdAtProvider', claData.createdAt) .input('updatedAtProvider', claData.updatedAt) .input('sourceUrl', claData.sourceUrl) .input('isFork', claData.isFork) .input('id', id) .query(updateQuery, (error) => { return callback(error ? error : null); }); } }); };<file_sep>/views/org/userApprovalStatus.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content div.container if entry.active === true h1 Awaiting action by an approver else if entry.active === false h1 Issue closed or resolved h4 REQUEST GUID p= entry.RowKey h4 STATUS p if entry.active === true | Awaiting team maintainer action else if entry.active === false | Closed / Resolved else | Unknown if entry.justification h4 | BUSINESS JUSTIFICATION small written by #{entry.ghu} blockquote= entry.justification if entry.active === true h4 DETAILS p //-if team && entry.issue //- NOTE: Commenting out GitHub links with approval repos, since they are being deprecated internally and this is a quick fix a.btn.btn-sm.btn-default(href='https://github.com/' + team.org.name + '/' + team.org.getWorkflowRepository().name + '/issues/' + entry.issue, target='_new') GitHub Tracking Issue# #{entry.issue} p small This is the issue created on GitHub and assigned to a team maintainer to handle the request. Note that the issue is visible by everyone who is a member of the organization on GitHub, so it is not the best place to provide specific justification information... but if you bump the issue, the contact should hopefully get a reminder to take a look. h4 ACTIONS form(method='post', action='/approvals/' + entry.RowKey + '/cancel') input.btn.btn-primary.btn-sm(type='submit', value='Cancel My Request') else table.table thead tr th Decision th Decision by th Actions th Decision note th Decision made tbody tr td= (entry.decision && entry.decision.toUpperCase) ? entry.decision.toUpperCase() : 'n/a' td= entry.decisionBy || 'Unknown Decisionmaker' td if entry.decisionEmail a.btn.btn-sm.btn-default(href='mailto:' + entry.decisionEmail) Email the maintainer td= entry.decisionNote || 'No notes left regarding this decision by the decisionmaker.' td p if entry.decisionTime && entry.decisionTime.toISOString time(datetime=entry.decisionTime.toISOString())= entry.decisionTime.toDateString() else | n/a <file_sep>/lib/graphProvider/microsoftGraphProvider.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; // This code adopted from our existing jobs code const cache = require('memory-cache'); const request = require('request'); module.exports = function createMicrosoftGraphProvider(graphOptions) { const secondsString = graphOptions.tokenCacheSeconds || '60'; const tokenCacheMilliseconds = parseInt(secondsString) * 1000; function getGraphAccessToken(callback) { const clientId = graphOptions.clientId; const clientSecret = graphOptions.clientSecret; if (!clientId || !clientSecret) { return callback(null, new Error('The graph provider requires an AAD clientId and clientSecret.')); } const tokenEndpoint = 'https://login.microsoftonline.com/microsoft.com/oauth2/token'; // These are the parameters necessary for the OAuth 2.0 Client Credentials Grant Flow. // For more information, see Service to Service Calls Using Client Credentials (https://msdn.microsoft.com/library/azure/dn645543.aspx). const requestParams = { 'grant_type': 'client_credentials', 'client_id': clientId, 'client_secret': clientSecret, 'resource': 'https://graph.microsoft.com' }; request.post({ url: tokenEndpoint, form: requestParams }, function (err, response, body) { if (err) { return callback(err, null); } const parsedBody = JSON.parse(body); if (parsedBody.error) { return callback(new Error(parsedBody.error.message), null); } else { return callback(null, parsedBody.access_token); } }); } function getGraphOptions(accessToken) { return { headers: { Authorization: `Bearer ${accessToken}`, }, json: true, }; } function getToken(callback) { const tokenKey = graphOptions.clientId; const token = cache.get(tokenKey); if (token) { return callback(null, token); } getGraphAccessToken((error, t) => { if (error) { return callback(error); } cache.put(tokenKey, t, tokenCacheMilliseconds); return callback(null, t); }); } function getUserById(aadId, options, subResource, callback) { if (!callback && typeof(subResource) === 'function') { callback = subResource; subResource = null; } const extraPath = subResource ? `/${subResource}` : ''; const url = `https://graph.microsoft.com/v1.0/users/${aadId}${extraPath}?$select=id,displayName,givenName,mail,userPrincipalName`; request.get(url, options, (err, response, body) => { if (err) { return callback(err, null); } else if (response.statusCode >= 400) { return callback(new Error(`Invalid status code: ${response.statusCode}`), null); } else if (body === undefined) { return callback(new Error('user not found'), null); } else if (body.error) { return callback(new Error(body.error.message), null); } else { return callback(null, body); } }); } function getTokenThenEntity(aadId, resource, callback) { getToken((error, token) => { if (error) { return callback(error); } getUserById(aadId, getGraphOptions(token), resource, callback); }); } return { getUserById: (aadId, callback) => { getTokenThenEntity(aadId, null, callback); }, getManagerById: (aadId, callback) => { getTokenThenEntity(aadId, 'manager', callback); }, getUserAndManagerById: (aadId, callback) => { getTokenThenEntity(aadId, null, (error, user) => { if (error) { return callback(error); } getTokenThenEntity(aadId, 'manager', (noManager, manager) => { if (!error && manager) { user.manager = manager; } callback(null, user); }); }); }, }; }; <file_sep>/routes/api/client/newRepo.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const jsonError = require('../jsonError'); const newOrgRepo = require('./newOrgRepo'); const router = express.Router(); router.use('/org/:org', (req, res, next) => { const orgName = req.params.org; const operations = req.app.settings.providers.operations; try { req.organization = operations.getOrganization(orgName); } catch (noOrganization) { return next(jsonError(new Error('This API endpoint is not configured for the provided organization name.'))); } return next(); }); router.use('/org/:org', newOrgRepo); module.exports = router; <file_sep>/Gruntfile.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // module.exports = function (grunt) { require('load-grunt-tasks')(grunt); grunt.initConfig({ pkg: grunt.file.readJSON('package.json'), builddir: 'public/css', buildscriptdir: 'public/js/', buildrootdir: 'public/', banner: '/*!\n' + ' * <%= pkg.name %> v<%= pkg.version %>\n' + ' * Homepage: <%= pkg.homepage %>\n' + ' * Copyright 2012-<%= grunt.template.today("yyyy") %> <%= pkg.author %>\n' + ' * Licensed under <%= pkg.license %>\n' + ' * Based on Bootstrap\n' + '*/\n', less: { dist: { options: { compress: false, strictMath: true }, files: {} } }, concat: { options: { banner: '<%= banner %>', stripBanners: false }, dist: { src: [], dest: '' } }, copy: { bootstrap: { files: [ { expand: true, src: '**', cwd: 'bower_components/bootstrap/dist/', dest: '<%= buildrootdir %>', }, ] }, typeaheadjs: { files: [ { expand: true, src: '**', cwd: 'bower_components/typeahead.js/dist/', dest: '<%= buildscriptdir %>', }, ] }, typeaheadjsBootstrapPatch: { files: [ { expand: true, src: '*.less', cwd: 'bower_components/typeahead.js-bootstrap3.less/', dest: 'resources/less/', }, ] }, octicons: { files: [ { expand: true, src: '**', cwd: 'node_modules/octicons/build/', dest: '<%= builddir %>', }, ] }, jquery: { files: [ { expand: true, src: '**', cwd: 'bower_components/jQuery/dist/', dest: '<%= buildscriptdir %>', }, ] }, resources: { files: [ { expand: true, src: '**', cwd: 'resources/', dest: '<%= buildrootdir %>', }, ] }, timeago: { files: [ { expand: true, src: 'jquery.timeago.js', cwd: 'bower_components/jquery-timeago/', dest: '<%= buildscriptdir %>', }, ] }, uitablefilter: { files: [ { expand: true, src: 'jquery.uitablefilter.js', cwd: 'bower_components/jquery-uitablefilter/', dest: '<%= buildscriptdir %>', }, ] }, d3: { files: [ { expand: true, src: 'd3.min.js', cwd: 'bower_components/d3/', dest: '<%= buildscriptdir %>', }, ] }, c3: { files: [ { expand: true, src: 'c3.min.css', cwd: 'bower_components/c3/', dest: '<%= builddir %>', }, { expand: true, src: 'c3.min.js', cwd: 'bower_components/c3/', dest: '<%= buildscriptdir %>', }, ] }, }, clean: { build: { src: ['*/build.scss', '*/build.less'] } } }); grunt.registerTask('none', function () { }); grunt.registerTask('build_less', 'build a regular theme from less', function() { var theme = 'resources/less'; var compress = true; var concatSrc; var concatDest; var lessDest; var lessSrc; var files = {}; var dist = {}; concatSrc = theme + '/_build.less'; concatDest = theme + '/build.less'; lessDest = '<%=builddir%>/bootstrap.css'; lessSrc = [ theme + '/' + 'build.less' ]; dist = {src: concatSrc, dest: concatDest}; grunt.config('concat.dist', dist); files = {}; files[lessDest] = lessSrc; grunt.config('less.dist.files', files); grunt.config('less.dist.options.compress', false); grunt.task.run(['concat', 'less:dist', /*'prefix:' + lessDest,*/ 'clean:build', compress ? 'compress:'+lessDest+':'+'<%=builddir%>/bootstrap.min.css':'none']); }); grunt.registerTask('compress', 'compress a generic css', function(fileSrc, fileDst) { var files = {}; files[fileDst] = fileSrc; grunt.log.writeln('compressing file ' + fileSrc); grunt.config('less.dist.files', files); grunt.config('less.dist.options.compress', true); grunt.task.run(['less:dist']); }); grunt.registerTask('default', ['copy', 'build_less']); }; <file_sep>/views/reposToolbar.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- //- THIS PAGE IS MICROSOFT-SPECIFIC //- DO NOT MERGE THIS PAGE UP TO 'PUBLIC-DEVELOP' div.navbar.navbar-inverse div.container-fluid div.navbar-header button.navbar-toggle.collapsed(type='button', data-toggle='collapse', data-target='#repos-section-navbar-collapse', aria-expanded='false') span.sr-only Toggle navigation span.icon-bar span.icon-bar span.icon-bar div.collapse.navbar-collapse#repos-section-navbar-collapse ul.nav.navbar-nav(style='margin-left:-30px') li.dropdown(class={ active: /*reposContext.section === 'org' || */ reposContext.section === 'orgs' }) a.dropbown-toggle( href=reposContext.org && reposContext.org.name ? '#' : '/', data-toggle='dropdown', role='button', aria-haspopup='true', aria-expanded='false') //= reposContext.org && reposContext.org.name ? reposContext.org.name : 'Organizations' = 'Organizations' | &nbsp; span.caret ul.dropdown-menu if reposContext.org li a(href='/' + reposContext.org.name)= reposContext.org.name if reposContext.availableOrganizations && reposContext.availableOrganizations.length > 0 if reposContext.org li.divider(role='separator') li.dropdown-header My organizations each availableOrg in reposContext.availableOrganizations - var directPivot = reposContext.pivotDirectlyToOtherOrg ? reposContext.pivotDirectlyToOtherOrg : '' li a(href='/' + availableOrg.name + directPivot)= availableOrg.name if reposContext.section !== 'orgs' li.divider(role='separator') li a(href='/') Join another organization else if reposContext.org === undefined li a(href='/') Join your first org //- commenting out work-in-progress to get up to production if reposContext.org li(class={ active: reposContext.section === 'org' }) a(href=(reposContext.org ? '/' + reposContext.org.name : ''))= reposContext.org.name li(class={ active: reposContext.section === 'repos' }) a(href=(reposContext.org ? '/' + reposContext.org.name : '') + '/repos') Repos li(class={ active: reposContext.section === 'teams' }) a(href=(reposContext.org ? '/' + reposContext.org.name : '') + '/teams') Teams li(class={ active: reposContext.section === 'people' }) a(href=(reposContext.org ? '/' + reposContext.org.name : '') + '/people') People //-ul.nav.navbar-nav.navbar-right <file_sep>/views/extensions/npm/published.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content .container .row: .col-sm-12 .nav ul.pager.zero-pad-bottom li.previous a(href='/' + organization.name + '/repos/' + repository.name) span(aria-hidden=true) &larr; = ' Back to the ' + repository.name + ' repository' .row: .col-sm-12 h1 Published #{context.packageVersionedName} if log ul each l in log if l li= l if context && context.package && context.package.name p: a.btn.btn-sm.btn-muted(href='https://npmjs.org/package/' + context.package.name)= 'View ' + context.package.name <file_sep>/views/repos/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout //-block append js script(type='text/javascript', src='/js/typeahead.bundle.min.js') block append js_doc_ready include ../js/search.js //-include autocomplete.js block content //- View services - var languageColor = viewServices.languageColor - var octicon = viewServices.octicon - var fileSize = viewServices.fileSize //- View variables and options - var specificTeamId = specificTeam ? specificTeam.id : null - var specificTeamView = specificTeamPermissions && specificTeamId .container //-form#search-container input.typeahead.search-input.typeahead-form-control(type='text', placeholder='Search repositories', spellcheck='false', autocomplete='off') if specificTeam .nav ul.pager.zero-pad-bottom li.previous a(href='/' + organization.name + '/teams/' + specificTeam.slug) span(aria-hidden=true) &larr; = ' Back to the ' + specificTeam.name + ' team' //- Scenario-based heading if organization && specificTeam h3 = specificTeam.name + ' team repositories' if specificTeamPermissions && specificTeamPermissions.allowAdministration = ' ' .label.label-warning.shrink66( title=specificTeamPermissions.sudo ? 'As a sudo maintainer, you can administer team settings in this application but not directly on GitHub.com' : 'As a team maintainer you can administer team settings in this application and directly on GitHub.com' )= specificTeamPermissions.sudo ? 'Sudo maintainer' : 'Team maintainer' h5= organization.name + ' organization' else if organization h3= organization.name + ' repositories' else h1 Repositories p.lead Across all officially managed #{config.brand.companyName} organizations //- If the age of the data is known if reposDataAgeInformation p.text-primary(style='margin-bottom:24px') if reposDataAgeInformation.changed = 'Updated ' + reposDataAgeInformation.changed if reposDataAgeInformation.updated && reposDataAgeInformation.changed |, refreshed else | Refreshed if reposDataAgeInformation.updated = ' ' + reposDataAgeInformation.updated .row .col-md-8 ul.nav.nav-pills li(class=(search.sort === 'Pushed' ? 'active' : '')) a(href='?sort=Pushed&tag=' + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? tag : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : ''), title='Sort by commit pushed date/time') != octicon('git-commit', 20) | &nbsp;Recent li(class=(search.sort === 'Stars' ? 'active' : '')) a(href='?sort=Stars&tag=' + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? tag : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : ''), title='Sort by stars') //- i.glyphicon.glyphicon-star != octicon('star', 16) | Stars li(class=(search.sort === 'Forks' ? 'active' : '')) a(href='?sort=Forks&tag=' + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? tag : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : ''), title='Sort by forks') != octicon('repo-forked', 14) | Forks li(class=(search.sort === 'Alphabet' ? 'active' : ''), title='Sort by repo name') a(href='?sort=Alphabet&tag=' + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? tag : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : '')) //- i.glyphicon.glyphicon-sort-by-alphabet != octicon('text-size', 20) | Name //-i.glyphicon.glyphicon-triangle-bottom li(class=(search.sort === 'Updated' ? 'active' : '')) a(href='?sort=Updated&tag=' + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? tag : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : ''), title='Sort by updated date') != octicon('calendar', 20) | Updated li(class=(search.sort === 'Created' ? 'active' : '')) a(href='?sort=Created&tag=' + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? tag : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : ''), title='Sort by created date') != octicon('zap', 12) | Created li(class=(search.sort === 'Size' ? 'active' : '')) a(href='?sort=Size&tag=' + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? tag : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : ''), title='Sort by approximate repo size') != octicon('database', 16) | Size form.form-horizontal#entitySearch(style='margin-top:24px') .form-group .col-md-5 div.input-group input.form-control#inputQuery( placeholder='Search repositories...', type='text', value=query && query.phrase ? query.phrase : null, style='max-width:400px') span.input-group-btn button( class='btn btn-muted' type='submit' style='border-width: 1px') Search .col-md-7 ul.nav.nav-pills li.dropdown(role='presentation') a.dropdown-toggle#typeLabel(data-toggle='dropdown', href='#', role='button', aria-haspopup='true', aria-expanded='false') = 'Type: ' if query && query.type strong= query.type else strong All span.caret ul.dropdown-menu.border-1px-primary(aria-labelledby='typeLabel', style='border-top:0;margin-top:0;padding-top:0;padding-bottom:0') - var currentType = query && query.type ? query.type : 'all' li(class={ active: currentType === 'all' }) a(href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.tt ? '&tt=' + query.tt : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&type=all') + (query.phrase ? '&q=' + query.phrase : '')) All li(class={ active: currentType === 'public' }) a(href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.tt ? '&tt=' + query.tt : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&type=public') + (query.phrase ? '&q=' + query.phrase : '')) Public li(class={ active: currentType === 'private' }) a(href=href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.tt ? '&tt=' + query.tt : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&type=private') + (query.phrase ? '&q=' + query.phrase : '')) Private li(class={ active: currentType === 'source' }) a(href=href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.tt ? '&tt=' + query.tt : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language): '') + ('&type=source') + (query.phrase ? '&q=' + query.phrase : '')) Sources li(class={ active: currentType === 'fork' }) a(href=href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.tt ? '&tt=' + query.tt : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&type=fork') + (query.phrase ? '&q=' + query.phrase : '')) Forks li.dropdown(role='presentation') a.dropdown-toggle#languageLabel(data-toggle='dropdown', href='#', role='button', aria-haspopup='true', aria-expanded='false') = 'Language: ' if query && query.language strong= query.language else strong All span.caret ul.dropdown-menu.border-1px-primary(aria-labelledby='languageLabel', style='border-top:0;margin-top:0;padding-top:0;padding-bottom:0') - var currentLanguage = query && query.language ? query.language : 'all' li(class={ active: currentLanguage === 'all' }) a(href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + ('&language=all') + (query.type ? '&type=' + query.type : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : '')) All if search && search.observedLanguages each language in Array.from(search.observedLanguages).sort() li(class={ active: currentLanguage === language.toLowerCase() }) a(href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + ('&language=' + search.observedLanguagesEncoded.get(language)) + (query.type ? '&type=' + query.type : '') + (query.tt ? '&tt=' + query.tt : '') + (query.phrase ? '&q=' + query.phrase : ''))= language //- Let the user filter by the type of permission they have, unless this is a team's specific repos if !specificTeamId li.dropdown(role='presentation') a.dropdown-toggle#teamTypeLabel(data-toggle='dropdown', href='#', role='button', aria-haspopup='true', aria-expanded='false') = 'Teams: ' if query && query.tt strong= query.tt else strong Any / all span.caret ul.dropdown-menu.border-1px-primary(aria-labelledby='teamTypeLabel', style='border-top:0;margin-top:0;padding-top:0;padding-bottom:0') - var currentTeamType = query && query.tt ? query.tt : 'all' li(class={ active: currentTeamType === 'all' }) a(href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&tt=all') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) All li(class={ active: currentTeamType === 'myadmin' }) a(href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&tt=myadmin') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) Teams with admin li(class={ active: currentTeamType === 'mywrite' }) a(href=href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&tt=mywrite') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) Teams you can write with li(class={ active: currentTeamType === 'myread' }) a(href=href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language): '') + ('&tt=myread') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) Teams you can pull via li(class={ active: currentTeamType === 'teamless' }) a(href=href='?page_number=' + (search.page) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + ('&tt=teamless') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) No teams if filters.length p(style='margin-top:24px') if search.totalRepos > 1 strong= search.totalRepos.toLocaleString() | results else if search.totalRepos === 1 strong 1 | result else | No results = ' for ' each filter in filters = filter.displayPrefix ? filter.displayPrefix + ' ' : '' strong= filter.displayValue || filter.value = ' ' = filter.displaySuffix ? filter.displaySuffix + ' ' : '' a.pull-right.btn.btn-sm.btn-muted-more(href='?') != octicon('x', 14) = ' Clear filter' hr if search.totalRepos === 0 .well.well-lg div.text-center p != octicon('repo', 24) if organization p.lead This organization doesn't have any repositories that match. else p.lead No repositories match across all Microsoft organizations. else nav(style='margin-bottom:48px') ul.pager li.previous(class=(search.page > 1 ? '' : 'disabled')) a(href='?page_number=' + (search.page-1) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (query.tt ? '&tt=' + query.tt : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") &larr; Previous li | #{search.pageFirstRepo.toLocaleString()} - #{search.pageLastRepo.toLocaleString()} of #{search.totalRepos.toLocaleString()} li.next(class=(search.page < search.totalPages ? '' : 'disabled')) a(href='?page_number=' + (search.page+1) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (query.tt ? '&tt=' + query.tt : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") Next &rarr; each repo in search.repos - var quickOrgName = repo.full_name.replace('/' + repo.name, '') - var repoOrgName = repo.organization ? repo.organization.name : quickOrgName - var localOrgName = organization ? organization.name : repoOrgName .repo(id=repo.name, style='padding-bottom:36px;display:block') h3 if showIds = repo.id + ' ' a(href='/' + localOrgName + '/repos/' + repo.name)= repo.name if repo.private === true | &nbsp; .label.label-warning.shrink66(title='This is a private repository and not open source.') Private if repo.permissions && specificTeam = ' ' if repo.permissions.admin .label.label-danger.shrink66(title='The ' + specificTeam.name + ' team grants permission to administer the repo') Admin else if repo.permissions.push .label.label-warning.shrink66(title='The ' + specificTeam.name + ' team grants permission to commit directly to the repo and accept pull requests') Write else if repo.permissions.pull .label.label-success.shrink66(title='The ' + specificTeam.name + ' team grants permission to clone/read/see the repo') Read if repo.description p=repo.description small ul.list-inline.list-horizontal-space if repo.language li span(style={color: languageColor(repo.language)}) != octicon('primitive-dot', 10) = ' ' + repo.language if repo.stargazers_count li != octicon('star', 15) = ' ' + repo.stargazers_count.toLocaleString() if repo.forks_count li != octicon('repo-forked', 12) = ' ' + repo.forks_count.toLocaleString() //- I'm using "pushed" as the "updated" to try and match what //- it feels like GH does on their homepage... if repo.momentDisplay.pushed li= 'Updated ' + repo.momentDisplay.pushed //- Looks like GitHub doesn't really show this value //-if repo.momentDisplay.updated li small= 'Updated ' + repo.momentDisplay.updated li &nbsp; if repo.momentDisplay.created li= 'Created ' + repo.momentDisplay.created if repo.size li != octicon('database', 13) = ' ' + fileSize(repo.size * 1024) if specificTeam && teamUrl && specificTeamPermissions && specificTeamPermissions.allowAdministration ul.list-inline li form(method='post', action=teamUrl + 'repos/' + repo.name + '/remove') p: input.btn.btn-sm.btn-muted( type='submit', onclick='return confirm(\'Are you sure that you want to remove access to this repo?\');' value='Remove team access to repo') li: a.btn.btn-sm.btn-muted( href='#', onclick='alert(\'To edit the permission level, a repo admin must perform this operation directly on GitHub.\'); return false;') Edit permission if !organization h6= repo.full_name.replace('/' + repo.name, '') + ' organization' nav ul.pager li.previous(class=(search.page > 1 ? '' : 'disabled')) a(href='?page_number=' + (search.page-1) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (query.tt ? '&tt=' + query.tt : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") &larr; Previous li h4(style="display:inline") | Page #{search.page} of #{search.totalPages} li.next(class=(search.page < search.totalPages ? '' : 'disabled')) a(href='?page_number=' + (search.page+1) + (specificTeamId ? '&teamRepos=' + specificTeamId : '') + (tag ? '&tag=' + tag : '') + (query.tt ? '&tt=' + query.tt : '') + (search.sort ? '&sort=' + search.sort : '') + (query.language ? '&language=' + search.observedLanguagesEncoded.get(query.language) : '') + (query.type ? '&type=' + query.type : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") Next &rarr; .col-md-3.col-md-offset-1 div(style='margin-left:12px') if orgs h4 Need to create a repo? p To create a new repo, first you need to select which organization will host it. hr h4 Organizations ul.list-unstyled each org in orgs li(style='margin-bottom:8px') a.btn.btn-muted-more.btn-sm( href='/' + org.name + '/repos' )= org.name <file_sep>/views/organization/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content div.container h1 Organization Dashboard p a.btn.btn-default(href='/organization/errors/active') Review untriaged errors <file_sep>/middleware/uptime.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // // Every minute, send an application insights metric indicating how long this // has been running. const moment = require('moment'); function everyMinute(insights, started) { const now = moment(); const minutes = now.diff(started, 'minutes'); insights.trackMetric('NodeApplicationUptime', minutes); } function initialize(insights) { if (insights) { const started = moment(); const report = everyMinute.bind(null, insights, started); setInterval(report, 1000 * 60); } } module.exports = { initialize: initialize, }; <file_sep>/routes/org/repoWorkflowEngine.js var utils = require('../../utils'); const async = require('async'); const fs = require('fs'); const path = require('path'); var repoWorkFlowEngine = RepoWorkflowEngine.prototype; function RepoWorkflowEngine(team, org, approvalPackage) { this.team = team; this.request = approvalPackage.request; this.user = approvalPackage.requestingUser; this.id = approvalPackage.id; this.org = org; this.typeName = 'Repository Create'; } repoWorkFlowEngine.messageForAction = function (action) { var message = null; if (action == 'deny') { message = 'The repo was not approved at this time.'; } else if (action == 'approve') { message = 'The repo has been created.'; } return message; }; repoWorkFlowEngine.editGet = function (req, res) { var self = this; var oss = self.team.oss; oss.render(req, res, 'org/team/approvals/editRepo', 'Edit Repo Request', { entry: this.request, teamUrl: req.teamUrl, team: req.team, }); }; repoWorkFlowEngine.editPost = function (req, res, next) { var self = this; var dc = self.team.oss.dataClient(); var visibility = req.body.repoVisibility; if (!(visibility == 'public' || visibility == 'private')) { return next(new Error('Visibility for the repo request must be provided.')); } var updates = { repoName: req.body.repoName, repoVisibility: visibility, repoUrl: req.body.repoUrl, repoDescription: req.body.repoDescription, }; dc.updateApprovalRequest(self.id, updates, function (error) { if (error) { return next(utils.wrapError(error, 'There was a problem updating the request.')); } res.redirect(req.teamUrl + 'approvals/' + self.id); }); }; repoWorkFlowEngine.getApprovedViewName = function () { return 'org/team/repos/repoCreated'; }; repoWorkFlowEngine.getDecisionEmailViewName = function () { return 'repoApprovals/decision'; }; repoWorkFlowEngine.getDecisionEmailSubject = function (approved, request) { return approved ? `Your ${request.repoName} repo is ready` : `Your ${request.repoName} repo request was not approved`; }; repoWorkFlowEngine.getDecisionEmailHeadline = function (approved/*, request*/) { return approved ? 'Repo ready' : 'Request returned'; }; repoWorkFlowEngine.generateSecondaryTasks = function (callback) { var self = this; var pendingRequest = self.request; var tasks = []; var org = self.org; var repoName = pendingRequest.repoName; var teamsCount = Math.floor(pendingRequest.teamsCount); for (var i = 0; i < teamsCount; i++) { var key = 'teamid' + i; var teamId = pendingRequest[key]; var permission = pendingRequest[key + 'p']; if (teamId && permission) { tasks.push(createAddRepositoryTask(org, repoName, teamId, permission)); } } if (pendingRequest.claEntity) { tasks.push(createSetLegacyClaTask(org, repoName, pendingRequest.claEntity, pendingRequest.claMail)); } if (pendingRequest.template) { tasks.push(createAddTemplateFilesTask(org, repoName, pendingRequest.template)); } callback(null, tasks); }; repoWorkFlowEngine.performApprovalOperation = function (callback) { var self = this; var properties = { description: self.request.repoDescription, homepage: self.request.repoUrl, 'private': self.request.repoVisibility == 'public' ? false : true, gitignore_template: self.request.gitignore_template, }; var org = self.org; org.createRepository(self.request.repoName, properties, function (error, newRepoDetails) { if (error) { error = utils.wrapError(error, `The GitHub API did not allow the creation of the new repo. ${error.message}`); } callback(error, newRepoDetails); }); }; function createSetLegacyClaTask(org, repoName, legalEntity, claMail) { 'use strict'; return function setLegacyClaTask(callback) { const repo = org.repo(repoName); repo.enableLegacyClaAutomation({ emails: claMail, legalEntity: legalEntity, }, (enableClaError) => { // Don't propagate as an error, just record the issue... let message = claMail ? `Successfully enabled the ${legalEntity} CLA for ${repoName}, notifying ${claMail}.` : `Successfully enabled the ${legalEntity} CLA for ${repoName}`; if (enableClaError) { message = `The CLA could not be enabled for the repo ${repoName} using the notification e-mail address(es) ${claMail} (${enableClaError})`; } const result = { error: enableClaError, message: message, }; callback(undefined, result); }); }; } var createAddRepositoryTask = function createAddRepoTask(org, repoName, id, permission) { return function (cb) { async.retry({ times: 3, interval: function (retryCount) { return 500 * Math.pow(2, retryCount); } }, function (callback) { org.team(id).addRepository(repoName, permission, function (error) { if (error) { return callback(error); } return callback(); }); }, function (error) { // Don't propagate as an error, just record the issue... var message = `Successfully added the "${repoName}" repo to GitHub team ID "${id}" with permission level ${permission.toUpperCase()}.`; if (error) { message = `The addition of the repo "${repoName}" to GitHub team ID "${id}" failed. The GitHub API returned an error: ${error.message}.`; } var result = { error: error, message: message, }; cb(null, result); }); }; }; function createAddTemplateFilesTask(org, repoName, templateName) { 'use strict'; const templatePath = path.join(__dirname, '../../data/templates/'); const userName = org.oss.configuration.github.user.initialCommit.username; const token = org.oss.configuration.github.user.initialCommit.token; const repo = org.repo(repoName); let files = []; return (taskCallback) => { async.waterfall([ function addCollaborator(callback) { repo.addCollaborator(userName, 'push', callback); }, function createDatasource(callback) { fs.readdir(path.join(templatePath, templateName), (error, fileNames) => { async.parallel(fileNames.map(fileName => { return (cb) => { fs.readFile(path.join(templatePath, templateName, fileName), 'utf8', (error, file) => { cb(error, { path: fileName, content: file }); }); }; }), callback); }); }, function addTemplateFiles(datasource, callback) { const message = 'Initial commit'; async.series(datasource.map(item => { return (cb) => { repo.createContents(token, item.path, message, item.content, cb); }; }), (error, result) => { if (!error) { files = datasource.map((item) => { return item.path; }); } callback(error, result); }); }, function removeCollaborator(result, callback) { repo.removeCollaborator(userName, callback); }, ], (error) => { var message = `Initial commit of ${files.join(', ')} files to the ${repoName} repo succeeded.`; if (error) { message = `Initial commit of template file(s) to the ${repoName} repo failed. An error: ${error.message}.`; } var result = { error: error, message: message, }; taskCallback(null, result); }); }; } module.exports = RepoWorkflowEngine;<file_sep>/routes/thanks.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // var express = require('express'); var router = express.Router(); var cachedPackageInformation = null; // Super-synchronous but rarely used page... function getPackageInfo(config) { if (cachedPackageInformation) { return cachedPackageInformation; } var thisPackage = require('../package.json'); cachedPackageInformation = {}; const privateFeedScope = config && config.npm && config.npm.privateFeedScope ? config.npm.privateFeedScope : 'no-configured-private-feed-scope'; for (var dependency in thisPackage.dependencies) { var componentPackage = require('../node_modules/' + dependency + '/package.json'); if (componentPackage && componentPackage.name && !componentPackage.name.includes(`@${privateFeedScope}`)) { cachedPackageInformation[dependency] = { homepage: componentPackage.homepage, description: componentPackage.description, }; } } return cachedPackageInformation; } router.get('/', function (req, res) { var config = req.app.settings.runtimeConfig.obfuscatedConfig; var components = getPackageInfo(config); res.render('thanks', { config: config, components: components, serviceBanner: config && config.serviceMessage ? config.serviceMessage.banner : undefined, title: 'Open Source Components', }); }); module.exports = router; <file_sep>/views/org/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block js_doc_ready | if (typeof initializeManageFilter == 'function') { initializeManageFilter(); } | if (typeof initializeMembershipFilter == 'function') { initializeMembershipFilter(); } block content div.container h1 span.capitalize= org.name + ' ' small Organization div.container div.row(style='margin-top:16px') div.col-md-3.col-lg-3 div.metro-box.ms-dark-blue a(href=org.baseUrl + 'new-repo') h3 Create a repo p NEW REPO div.col-md-3.col-lg-3 div.metro-box.ms-blue a(href=org.baseUrl + 'teams?set=available') h3 Join a team p VIEW TEAMS div.col-md-3.col-lg-3 div.metro-box.ms-purple a(href='https://github.com/orgs/' + org.name + '/new-team', target='_new') h3 Add a team p NEW TEAM div.col-md-3.col-lg-3 div.metro-box(class=accountInfo.isMembershipPublic === true ? 'ms-green' : 'ms-yellow') a(href=org.baseUrl + 'membership') if accountInfo.isMembershipPublic === true h3 Public member else h3 Concealed p= 'PUBLIC MEMBERSHIP' if accountInfo.isSudoer h2 SUDO p Your account has sudoer rights for this organization. You have additional capabilities enabled to help ensure the health of the organization, its teams and repos. Please use care. if accountInfo.organizationOverview.teams.maintainer && accountInfo.organizationOverview.teams.maintainer.length if accountInfo.pendingApprovals && accountInfo.pendingApprovals.length && accountInfo.pendingApprovals.length > 0 h2 Pending Approvals p a.btn.btn-default(href='./approvals/') See all pending #{org.name} approvals (#{accountInfo.pendingApprovals.length}) if accountInfo.organizationOverview.teams.maintainer h2.capitalize #{org.name} Teams You Maintain script(type='text/javascript'). function initializeManageFilter() { var inputManageFilter = $('#manage-filter'); if (inputManageFilter) { inputManageFilter.keyup(function () {; $.uiTableFilter($('table#manage-table'), this.value, ['Title', 'Organization', 'GitHub Name']); }); } } div.container table.table#manage-table thead tr th(colspan='1') form#manage-filter-form input.form-control#manage-filter(name='filter', placeholder='Filter teams I manage', type='text') th p i.glyphicon.glyphicon-search tr th GitHub Name // th Organization th.thirtypercent Manage tbody each team in accountInfo.organizationOverview.teams.maintainer tr td a.btn.btn-sm.btn-muted(href=org.baseUrl + 'teams/' + team.slug + '/')= team.name // td= team.org.name td.thirtypercent p a.btn.btn-sm.btn-default(href=org.baseUrl + 'teams/' + team.slug + '/') Manage Team if accountInfo && accountInfo.membershipStatus === 'active' && accountInfo.isMembershipPublic !== true h1 Go public with your support of the #{org.name} org p Your profile on GitHub currently does not list your participation in the #{org.name} organization. By making your association public, others in the community will see you listed on the page for #{org.name} and your personal GitHub profile page will show the logo, too. p a.btn.btn-default.btn-sm(href=org.baseUrl + 'membership') Learn more h2 Team Memberships if accountInfo.teamsMaintainedHash p Here are teams that you are a member of but do not maintain. if accountInfo.organizationOverview.teams.member && accountInfo.organizationOverview.teams.member.length script(type='text/javascript'). function initializeMembershipFilter() { var inputMembershipFilter = $('#membership-filter'); if (inputMembershipFilter) { inputMembershipFilter.keyup(function () {; $.uiTableFilter($('table#membership-table'), this.value, ['Team']); }); } } table.table#membership-table thead tr th(colspan='1') form#membership-filter-form input.form-control#membership-filter(name='filter', placeholder='Filter my teams', type='text') th p i.glyphicon.glyphicon-search tr th Team th.thirtypercent View tbody - var everyoneTeamId = org.inner.settings.teamAllMembers each team in accountInfo.organizationOverview.teams.member if team.id && accountInfo.teamsMaintainedHash && accountInfo.teamsMaintainedHash[team.id] !== undefined // Skipping this team since they are already maintaining it else tr td a.capitalize.btn.btn-sm.btn-muted(href='/' + team.organization.login + '/teams/' + team.slug, target='_new')= team.name td.thirtypercent p a.btn.btn-default.btn-sm(href='/' + team.organization.login + '/teams/' + team.slug, target='_new') | Open on GitHub else //-p You are not currently a member of any GitHub teams that grant you permission to specific repositories. You may be pre-approved to join teams. p You are not currently a member of any GitHub teams for #{org.name}. <em>This view is cached.</em> p a.btn.btn-default(href=org.baseUrl + 'teams')= (accountInfo.organizationOverview.teams.member && accountInfo.organizationOverview.teams.member.length) ? 'Join another team' : 'Join a team' if accountInfo.orgUser hr - var orgUser = accountInfo.orgUser h1(style='margin:36px 0') About the #{org.name} Organization div.row div.col-md-3.col-lg-3 p img.img-thumbnail.img-responsive(src=orgUser.avatar(400), alt=(orgUser.name || orgUser.login)) h3= orgUser.name h4= orgUser.login p(style='margin-top:18px') a.btn.btn-sm.btn-muted(href='https://github.com/' + org.name, target='_new') | Open on GitHub div.col-md-8.col-lg-8.col-md-offset-1.col-lg-offset-1 div.row div.col-md-6.col-lg-6 if orgUser.company h6 Company p= orgUser.company if orgUser.location h6 Location p= orgUser.location if orgUser.email h6 E-mail p= orgUser.email if orgUser.otherFields.blog h6 On the Web p a(href=orgUser.otherFields.blog, target='_new') = orgUser.otherFields.blog + ' ' i.glyphicon.glyphicon-share-alt if orgUser.getProfileCreatedDate() h6 Created p time(datetime=orgUser.getProfileCreatedDate().toISOString())= orgUser.getProfileCreatedDate().toDateString() if orgUser.getProfileCreatedDate() h6 Updated p time(datetime=orgUser.getProfileUpdatedDate().toISOString())= orgUser.getProfileUpdatedDate().toDateString() hr if org.inner.settings.organizationPurpose h6 How we use this organization p= org.inner.settings.organizationPurpose if org.inner.settings.type h6 Supported Repository Types ul.list-unstyled li Public if org.inner.settings.type == 'publicprivate' || org.inner.settings.type == 'private' li Private div.col-md-6.col-lg-h6 h6 Repositories if orgUser.otherFields.public_repos h2 = orgUser.otherFields.public_repos + ' ' small Public if orgUser.otherFields.total_private_repos h2 = orgUser.otherFields.total_private_repos + ' ' small Private hr h6 Remaining Repositories if orgUser.otherFields.plan && orgUser.otherFields.plan.private_repos h2.capitalize = orgUser.otherFields.plan.name + ' ' small Plan h2 = (orgUser.otherFields.plan.private_repos - orgUser.otherFields.total_private_repos) + ' ' small Private h2 | &infin;&nbsp; small Public if org.inner.settings.trainingResources - var tr = org.inner.settings.trainingResources if tr.organization && tr.organization.length && tr.organization.length > 0 hr h3 Organization Resource#{tr.organization.length > 1 ? 's' : ''} ul.list-unstyled each resource in tr.organization li p a(href=resource.link, target='_new') = resource.title + ' ' i.glyphicon.glyphicon-share-alt if resource.text br small= resource.text hr p a.btn.btn-default(href=org.baseUrl + 'leave') Leave #{org.name}<file_sep>/views/organization/whois/result.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content div.container p a.btn.btn-sm.btn-default(href='/') Back if info && info.renamedUserMessage .alert.alert-warning h3 Renamed user p.lead= info.renamedUserMessage if info && info.ghu && info.ghid h1 = info.ghu if info.serviceAccount = ' ' .label.label-warning Service account p Here is the information we have on this user. This user has a #[strong link]. if personEntry && personEntry.serviceAccount .alert.alert-success h3 Marked as a Service Account .row .col-md-6 h5 REDIS PEOPLE COLLECTION p small Daily snapshot from people feed if personEntry .alert.alert-success h3 | Known person &nbsp; small= upn if personEntry && personEntry.alias p a.btn.btn-sm.btn-default(target='_new', href='http://who/is/' + personEntry.alias)= 'http://who/is/' + personEntry.alias p Based on the most recent Redis cache of the employee directory, this employee is known and active. pre(style='font-size:8pt;width:50%')= JSON.stringify(personEntry, undefined, 2) else - var whatWeKnow = upn || info.aadupn - var strippedAlias = whatWeKnow && whatWeKnow.includes('@') ? whatWeKnow.substr(0, whatWeKnow.indexOf('@')) : null .alert(class=info && info.serviceAccount ? 'alert-success' : 'alert-danger') h3 | Unknown person &nbsp; small= whatWeKnow p Based on the most recent Redis cache of the employee directory, there is no entry for this UPN. if strippedAlias p We have stripped a potential alias from the e-mail address, #{whatWeKnow}, to get to #{strippedAlias}... p a.btn.btn-sm.btn-default(target='_new', href='http://who/is/' + strippedAlias)= 'http://who/is/' + strippedAlias .col-md-6 h5 REAL-TIME MICROSOFT GRAPH p small Includes service accounts if realtimeGraph .alert.alert-success h3 Account exists by ID if realtimeGraph.displayName h4= realtimeGraph.displayName ul li AAD ID: #{realtimeGraph.id} if realtimeGraph.mail li Corporate mail: #{realtimeGraph.mail} if realtimeGraph.userPrincipalName li UPN: #{realtimeGraph.userPrincipalName} if realtimeGraph.manager h4 Manager: #{realtimeGraph.manager.displayName} ul li AAD ID: #{realtimeGraph.manager.id} if realtimeGraph.manager.mail li Corporate mail: #{realtimeGraph.manager.mail} if realtimeGraph.manager.userPrincipalName li UPN: #{realtimeGraph.manager.userPrincipalName} else if info && info.serviceAccount .alert.alert-success h4 No manager in the graph p This account has no manager. It is properly marked as a "Service Account" else .alert.alert-warning h4 No manager in the graph p This account has no manager. It is likely a Service Account. else p No results. The real-time lookup requires the following conditions: if info && info.aadoid .alert.alert-danger h3 Former employee h5 Known ID #{info.aadoid} is missing else .alert.alert-warning h3 Potential former employee h5 Validate whether they are still employed p No AAD ID was previously known, so they never linked their account using the portal when we were able to retrieve their ID ul li That the account is linked li That the linked account has an Active Directory ID (AAD ID). An account may not have the AAD ID if they have never used the portal while signed in. if info && info.aadoid li This account #[strong did] have an AAD ID that failed the lookup: #{info.aadoid} table.table thead tr th GitHub Username th GitHub User ID tbody tr td p a.btn.btn-muted-more.btn-sm(href='https://github.com/' + info.ghu, target='_new')= info.ghu td= info.ghid if info.aadupn h2 #{config.brand.companyName} Directory table.table thead tr th Name th Email tbody tr td= info.aadname td a.btn.btn-sm.btn-muted-more(href='mailto:' + info.aadupn)= info.aadupn hr //- not used if info.foundGitHubId pre= JSON.stringify(info.foundGitHubId, undefined, 2) if info && info.orgs h1 Active GitHub Organization Memberships .container each org in info.orgs .row .col-md-4 p.lead= org.name .col-md-4 p a.btn.btn-sm.btn-muted(target='_new', href='https://github.com/orgs/' + org.name + '/people?utf8=%E2%9C%93&query=' + info.ghu) View #{info.ghu} in GitHub people list - var ghLogin = info.ghu || (info.githubInfoButNoLink ? info.githubInfoButNoLink.login : 'UNKNOWN THIS IS A BUG') form(method='post', action=postUrl || '/organization/whois/github/' + ghLogin) if info.orgs.length && info.orgs.length > 0 h1 Actions p.lead DANGER ZONE - NO CONFIRMATION ul.list-inline li input.btn.btn-danger(type='submit', value=info.ghid && info.ghu ? 'Remove link + drop from ' + info.orgs.length + ' orgs' : 'Drop from ' + info.orgs.length + ' orgs', name='remove-all') if info.aadupn && !info.serviceAccount li input.btn.btn-default(type='submit', value='Mark as Service Account', name='mark-as-service-account') if info.aadupn && info.serviceAccount li input.btn.btn-default(type='submit', value='Remove Service Account designation', name='unmark-service-account') else if info.ghid && info.ghu p <strong>NOTE:</strong> This user is not currently a member of any organizations. However, the "link" with their previous account still exists. Consider removing this. h1 Actions p.lead DANGER ZONE - NO CONFIRMATION p input.btn.btn-danger(type='submit', value='Remove link', name='remove-link-only') if info && info.githubInfoButNoLink h1 WHOIS #{info.githubInfoButNoLink.login} p.lead We could not find a link for this user. This is a GitHub user. p This person is not in any of the organizations managed by this site. //-form(method='post', action=postUrl || '/organization/whois/github/' + info.ghu) p input.btn.btn-default(type='submit', value='Drop from org', name='remove-primary-org') h3 GitHub Profile ul each value, property in info.githubInfoButNoLink li strong= property | :&nbsp; = value else if info && info.aadupn p Their UPN was #{info.aadupn} else h1 We don't know who that is in relation to this system! ul li No "link" present currently li Not a member of any of the official organizations at this time based on GitHub API data <file_sep>/business/account.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const common = require('./common'); const wrapError = require('../utils').wrapError; const githubEntityClassification = require('../data/github-entity-classification.json'); const primaryAccountProperties = githubEntityClassification.account.keep; const secondaryAccountProperties = githubEntityClassification.account.strip; class Account { constructor(entity, operations, getCentralOperationsToken) { common.assignKnownFields(this, entity, 'account', primaryAccountProperties, secondaryAccountProperties); const privates = _private(this); privates.operations = operations; privates.getCentralOperationsToken = getCentralOperationsToken; } getDetails(options, callback) { if (!callback && typeof(options) === 'function') { callback = options; options = null; } options = options || {}; const self = this; const token = _private(this).getCentralOperationsToken(); const operations = _private(this).operations; const id = this.id; if (!id) { return callback(new Error('Must provide a GitHub user ID to retrieve account information.')); } const parameters = { id: id, }; const cacheOptions = { maxAgeSeconds: options.maxAgeSeconds || operations.defaults.accountDetailStaleSeconds, }; if (options.backgroundRefresh !== undefined) { cacheOptions.backgroundRefresh = options.backgroundRefresh; } return operations.github.call(token, 'users.getById', parameters, cacheOptions, (error, entity) => { if (error) { return callback(wrapError(error, 'Could not get details about account "${id}".')); } common.assignKnownFields(self, entity, 'account', primaryAccountProperties, secondaryAccountProperties); callback(null, entity); }); } } module.exports = Account; const privateSymbol = Symbol(); function _private(self) { if (self[privateSymbol] === undefined) { self[privateSymbol] = {}; } return self[privateSymbol]; } <file_sep>/middleware/errorHandler.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["error", "log"] }] */ const querystring = require('querystring'); const utils = require('../utils'); function redactRootPathsFromString(string, path) { if (typeof string === 'string' && string.includes && string.split) { return string.split(path).join('[app]'); } return string; } function redactRootPaths(view) { const path = process.cwd(); if (typeof view === 'object') { for (var property in view) { if (view.hasOwnProperty(property)) { var value = view[property]; if (typeof value === 'string') { view[property] = redactRootPathsFromString(value, path); } } } } else if (typeof view === 'string') { return redactRootPathsFromString(view, path); } return view; } function containsNewlinesNotHtml(error) { if (error && error.message && error.message.includes && error.message.split) { var newlines = error.message.split('\n'); return newlines.length > 3 && !error.message.includes('</'); } return false; } module.exports = function (err, req, res, next) { // CONSIDER: Let's eventually decouple all of our error message improvements to another area to keep the error handler intact. var config = null; var correlationId = req.correlationId; var errorStatus = err ? (err.status || err.statusCode) : undefined; // Per GitHub: https://developer.github.com/v3/oauth/#bad-verification-code // When they offer a code that another GitHub auth server interprets as invalid, // the app should retry. if ((err.message === 'The code passed is incorrect or expired.' || (err.message === 'Failed to obtain access token' && err.oauthError.message === 'The code passed is incorrect or expired.')) && req.scrubbedUrl.startsWith('/auth/github/')) { req.insights.trackMetric('GitHubInvalidExpiredCodeRedirect', 1); req.insights.trackEvent('GitHubInvalidExpiredCodeRetry'); return res.redirect(req.scrubbedUrl === '/auth/github/callback/increased-scope?code=*****' ? '/auth/github/increased-scope' : '/auth/github'); } if (err.message && err.message.includes && err.message.includes('ETIMEDOUT') && (err.message.includes('192.168.127.12') || err.message.includes('172.16.17.32'))) { req.insights.trackMetric('GitHubApiTimeout', 1); req.insights.trackEvent('GitHubApiTimeout'); err = utils.wrapError(err, 'The GitHub API is temporarily down. Please try again soon.'); } var primaryUserInstance = req.user ? req.user.github : null; if (req && req.app && req.app.settings && req.app.settings.dataclient && req.app.settings.runtimeConfig) { config = req.app.settings.runtimeConfig; if (config.authentication.scheme !== 'github') { primaryUserInstance = req.user ? req.user.azure : null; } var version = config && config.logging && config.logging.version ? config.logging.version : '?'; var dc = req.app.settings.dataclient; if (config.logging.errors && err.status !== 403 && err.skipLog !== true) { dc.insertErrorLogEntry(version, req, err); var insightsProperties = { url: req.scrubbedUrl || req.originalUrl || req.url, }; if (errorStatus) { insightsProperties.statusCode = errorStatus.toString(); } req.insights.trackException(err, insightsProperties); } } if (err !== undefined && err.skipLog !== true) { console.log('Error: ' + (err && err.message ? err.message : 'Error is undefined.')); if (err.stack) { console.error(err.stack); } if (err.innerError) { var inner = err.innerError; console.log('Inner: ' + inner.message); if (inner.stack) { console.log(inner.stack); } } } // Bubble OAuth errors to the forefront... this is the rate limit scenario. if (err && err.oauthError && err.oauthError.statusCode && err.oauthError.statusCode && err.oauthError.data) { var detailed = err.message; err = err.oauthError; err.status = err.statusCode; var data = JSON.parse(err.data); if (data && data.message) { err.message = err.statusCode + ': ' + data.message; } else { err.message = err.statusCode + ' Unauthorized received. You may have exceeded your GitHub API rate limit or have an invalid auth token at this time.'; } err.detailed = detailed; } // Don't leak the Redis connection information. if (err && err.message && err.message.indexOf('Redis connection') >= 0 && err.message.indexOf('ETIMEDOUT')) { err.message = 'The session store was temporarily unavailable. Please try again.'; err.detailed = 'Azure Redis Cache'; } if (res.headersSent) { console.error('Headers were already sent.'); return next(err); } if (err && err.forceSignOut === true && req && req.logout) { req.logout(); } var safeMessage = redactRootPaths(err.message); const view = { message: safeMessage, encodedMessage: querystring.escape(safeMessage), messageHasNonHtmlNewlines: containsNewlinesNotHtml(err), serviceBanner: config && config.serviceMessage ? config.serviceMessage.banner : undefined, detailed: err && err.detailed ? redactRootPaths(err.detailed) : undefined, encodedDetailed: err && err.detailed ? querystring.escape(redactRootPaths(err.detailed)) : undefined, errorFancyLink: err && err.fancyLink ? err.fancyLink : undefined, errorStatus: errorStatus, skipLog: err.skipLog, skipOops: err && err.skipOops ? err.skipOops : false, error: {}, title: err.title || (err.status === 404 ? 'Not Found' : 'Oops'), primaryUser: primaryUserInstance, user: req.user, config: config && config.obfuscatedConfig ? config.obfuscatedConfig : null, }; // Depending on the library in use, we get everything from non-numeric textual status // descriptions to status codes as strings and more. Set the status code found in // the error if we have it. var errStatusAsNumber = null; if (err.status) { errStatusAsNumber = parseInt(err.status); } const resCode = errStatusAsNumber || err.code || err.statusCode || 500; res.status(resCode); // Support JSON-based error display for the API route, showing just a small // subset of typical view properties to share from the error instance. if (err && err.json === true) { const safeError = { message: safeMessage, correlationId: correlationId, }; if (err.documentation_url) { safeError.documentation_url = err.documentation_url; } const fieldsOfInterest = ['serviceBanner', 'detailed']; fieldsOfInterest.forEach((fieldName) => { if (view[fieldName]) { safeError[fieldName] = view[fieldName]; } }); res.json(safeError); } else { res.render('error', view); } }; <file_sep>/lib/mailProvider/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const providers = [ 'customMailService', 'mockMailService', ]; // Providers contract: // - sendMail function(message, callback): sends mail // - html property: whether or not the provider sends HTML mail // - info property: version and name info to use in any logging function patchOverride(provider, newToAddress, htmlOrNot) { const sendMail = provider.sendMail; provider.sendMail = (mailOptions, callback) => { let originalTo = mailOptions.to; if (typeof originalTo !== 'string' && originalTo.join) { originalTo = originalTo.join(', '); } if (!mailOptions.content) { mailOptions.content = ''; } mailOptions.to = newToAddress; const initialContent = mailOptions.content; const redirectMessage = `This mail was intended for "${originalTo}" but was instead sent to "${newToAddress}" per a configuration override.\n`; mailOptions.content = htmlOrNot ? `<p><em>${redirectMessage}</em></p>\n${initialContent}` : `${redirectMessage}\n${initialContent}`; sendMail(mailOptions, callback); }; return provider; } module.exports = function createMailProviderInstance(config, callback) { const mailConfig = config.mail; if (mailConfig === undefined) { return callback(); } const provider = mailConfig.provider; if (!provider) { return callback(); } let found = false; providers.forEach((supportedProvider) => { if (supportedProvider === provider) { found = true; const providerInstance = require(`./${supportedProvider}`)(config); if (mailConfig.overrideRecipient) { patchOverride(providerInstance, mailConfig.overrideRecipient, providerInstance.html); } return callback(null, providerInstance); } }); if (found === false) { return callback(new Error(`The mail provider "${mailConfig.provider}" is not implemented or configured at this time.`)); } }; <file_sep>/views/reconnectGitHub.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends layout //- Conditions for this page: // - linked // - using AAD-primary authentication // - no stored GitHub token block content div.container if migratedOpenSourceHubUser !== undefined h1 Welcome else h1 Please sign in with GitHub if migratedOpenSourceHubUser !== undefined p.lead Please sign in with GitHub p Your account has been migrated from the Open Source Hub to the new GitHub management experience for Microsoft. p Since this is the first time you are using this new app, we need you to authenticate with your "#{expectedUsername}" GitHub account. else p.lead This application needs to connect to your GitHub account to continue. .vertical-space p a.btn.btn-lg.btn-primary(href='/signin/github') Authenticate with GitHub | &nbsp; &nbsp; &nbsp; a.btn.btn-lg.btn-default(href='/signout') Sign out <file_sep>/jobs/reports/repositories.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /* eslint no-console: ["error", { allow: ["warn", "dir", "log"] }] */ 'use strict'; const _ = require('lodash'); const automaticTeams = require('../../webhooks/tasks/automaticTeams'); const moment = require('moment'); const Q = require('q'); const qlimit = require('qlimit'); const providerName = 'repositories'; const definitions = require('./repositoryDefinitions.json'); const exemptRepositories = require('./exemptRepositories.json'); const definitionsByName = {}; for (let i = 0; i < definitions.length; i++) { const definition = definitions[i]; definitionsByName[definition.name] = definition; } const simpleDateFormat = 'l'; // TODO: Configuration over hardcoding const knownHardcodedClaWebhooks = new Set([ ]); function processRepositories(context) { return getRepos(context) .then(iterateRepos); } function getRepositoryAdministrators(repositoryContext) { const repository = repositoryContext.repository; const administrators = new Map(); const cacheOptions = { backgroundRefresh: false, // immediate maxAgeSeconds: 60 * 60 * 24 * 7, // 7 days }; return Q.allSettled([ getRepositoryTeams(repository, cacheOptions) .then(justAdminTeams) .then(adminTeams => { repositoryContext.countOfAdministratorTeams = adminTeams.length; return Q(adminTeams); }) .then(teamMembers.bind(null, cacheOptions, administrators)), getRepositoryDirectCollaborators(repository, cacheOptions) .then(justAdminCollaborators) .then(directAdminCollaborators => { repositoryContext.countOfAdministratorCollaborators = directAdminCollaborators.length; return Q(directAdminCollaborators); }) .then(storeCollaborators.bind(null, administrators)), ]).then(() => { return Q(administrators); }).catch(issue => { throw issue; }); } function teamMembers(cacheOptions, administrators, teams) { return Q.all(teams.map(teamPermission => { const deferred = Q.defer(); const team = teamPermission.team; team.getMembers(cacheOptions, (error, members) => { if (error) { return deferred.reject(error); } for (let i = 0; i < members.length; i++) { const member = members[i]; const id = member.id; let entry = administrators.get(id); if (!entry) { entry = createUserEntry(member); administrators.set(id, entry); } entry.reasons.memberships.push(team); } return deferred.resolve(); }); return deferred.promise; })); } function identifyActionableAdmins(repositoryContext, repository, administrators) { const data = automaticTeams.processOrgSpecialTeams(repository.organization); const specialTeamIds = data[2]; const actionableAdministrators = []; const adminIds = Array.from(administrators.keys()); for (let i = 0; i < adminIds.length; i++) { const id = adminIds[i]; const account = administrators.get(id); // Remove service accounts if (account.link && account.link.serviceAccount) { continue; } // Direct collaborators should always be involved if (account.reasons.directCollaborator) { actionableAdministrators.push(account); continue; } // Remove administrators who are only in special teams let realMemberships = 0; for (let j = 0; j < account.reasons.memberships.length; j++) { const team = account.reasons.memberships[j].id; if (!specialTeamIds.has(team)) { ++realMemberships; } } if (realMemberships) { actionableAdministrators.push(account); } } repositoryContext.actionableAdministrators = actionableAdministrators; return Q(administrators); } function iterateRepos(context) { let repos = context.entities.repos; if (context.settings.slice) { let offset = 3000; let initial = repos.length > offset ? offset - context.settings.slice : 0; repos = repos.slice(initial, initial + context.settings.slice); } context.processing.repos = { remaining: repos.length, }; const limit = qlimit(context.settings.parallelRepoProcessing || 2); const process = processRepository.bind(null, context); return Q.allSettled(repos.map(limit(process))).then(() => { // Settled values are not reviewed, since many are just missing repos (404) // for repos that have already been deleted or otherwise moved context.repositoryData = _.sortBy(context.repositoryData, 'nameLowercase'); }).thenResolve(context); } function getRepositoryDetails(repositoryContext) { const deferred = Q.defer(); const repository = repositoryContext.repository; const cacheOptions = { backgroundRefresh: false, maxAgeSeconds: 60 * 60 * 24, // 1 day }; repository.getDetails(cacheOptions, error => { if (error) { return deferred.reject(error); } return deferred.resolve(repositoryContext); }); return deferred.promise; } function getIndividualUserLink(context, id) { if (!context.linkData) { return Q.reject(new Error('No link information has been loaded')); } return Q(context.linkData.get(id)); } function gatherLinkData(repositoryContext, administrators) { const keys = Array.from(administrators.keys()); const limit = qlimit(2); return Q.allSettled(keys.map(limit(id => { return getIndividualUserLink(repositoryContext.parent, id).then(link => { const entry = administrators.get(id); entry.link = link; return Q(); }); }))).then(() => { return Q(administrators); }).catch(error => { throw error; }); } function processRepository(context, repository) { console.log(context.processing.repos.remaining-- + ': ' + repository.full_name); // repo context const repositoryContext = { parent: context, definitionsUsed: new Set(), issues: {}, name: repository.full_name, nameLowercase: repository.full_name.toLowerCase(), repository: repository, countOfAdministratorCollaborators: 0, countOfAdministratorTeams: 0, }; if (!context.repositoryData) { context.repositoryData = []; } context.repositoryData.push(repositoryContext); const resolveLinks = gatherLinkData.bind(null, repositoryContext); const getActionableAdmins = identifyActionableAdmins.bind(null, repositoryContext, repository); const getUnlinkedAdmins = identityAdministratorsWithoutLinks.bind(null, repositoryContext); const repositoryAdmins = getRepositoryAdministrators.bind(null, repositoryContext); return getRepositoryDetails(repositoryContext) .then(repositoryAdmins) .then(administrators => { repositoryContext.administrators = administrators; return Q(administrators); }) .then(resolveLinks) .then(getActionableAdmins) .then(getUnlinkedAdmins) .then(() => { const organization = repository.organization; const privateEngineering = organization.privateEngineering; const basicRepository = { repoName: repository.name, entityName: repository.full_name, orgName: organization.name, // Pre-populate; overwritten if and when an approval is found approvalType: { color: 'gray', text: 'Created on GitHub or unknown', }, countOfAdministratorCollaborators: repositoryContext.countOfAdministratorCollaborators || '-', countOfAdministratorTeams: repositoryContext.countOfAdministratorTeams || '-', }; return identifyContributorLicenseAgreeementHooks(context, repositoryContext, basicRepository).then(() => { return getNewRepoCreationInformation(context, repositoryContext, basicRepository).then(() => { const publicPrivateStatus = { text: repository.private ? 'Private' : 'Public', color: repository.private ? 'red' : 'green', }; basicRepository.status = publicPrivateStatus; // Recipients repositoryContext.recipients = []; const corporateAdministrators = []; if (repositoryContext.actionableAdministrators) { for (let y = 0; y < repositoryContext.actionableAdministrators.length; y++) { const admin = repositoryContext.actionableAdministrators[y]; if (admin && admin.link && admin.link.aadupn) { corporateAdministrators.push(admin.link.aadupn); if (!privateEngineering) { // Private engineering orgs do not send individuals nags on emails for now repositoryContext.recipients.push({ type: 'upn', value: admin.link.aadupn, reasons: transformReasonsToArray(admin, repository.full_name), }); } } } } // Send to org admins const orgName = repository.organization.name; const orgData = context.organizationData[orgName]; for (let i = 0; orgData && orgData.organizationContext && orgData.organizationContext.recipients && orgData.organizationContext.recipients.length && i < orgData.organizationContext.recipients.length; i++) { repositoryContext.recipients.push(orgData.organizationContext.recipients[i]); } // Basic administrators info basicRepository.administrators = 'None'; if (corporateAdministrators.length > 0) { let caLink = 'mailto:' + corporateAdministrators.join(';') + '?subject=' + repository.full_name; const peoplePlurality = corporateAdministrators.length > 1 ? 'people' : 'person'; basicRepository.administrators = { link: caLink, text: `${corporateAdministrators.length} ${peoplePlurality}`, }; } const actionEditCollaborators = { link: `https://github.com/${repository.full_name}/settings/collaboration`, text: 'Permissions', }; const actionDelete = { link: `https://github.com/${repository.full_name}/settings`, text: 'Delete', }; const actionView = { link: `https://github.com/${repository.full_name}`, text: 'Open', }; const actionShip = { link: `https://github.com/${repository.full_name}/settings`, text: 'Ship it', }; const actionViewInPortal = context.config.microsoftOpenSource ? { link: `${context.config.microsoftOpenSource.repos}${organization.name}/repos/${repository.name}`, text: 'Details', } : null; const actionConfigureCLA = context.config.microsoftOpenSource ? { link: `${context.config.microsoftOpenSource.repos}${organization.name}/repos/${repository.name}/extensions/cla`, text: 'Configure CLA', } : null; if (repositoryContext.administratorsByType.linked.length === 0 || repositoryContext.actionableAdministrators.length === 0) { addEntityToIssueType(context, repositoryContext, 'noRepositoryAdministrators', basicRepository, actionEditCollaborators, actionViewInPortal); } let createdAt = repository.created_at ? moment(repository.created_at) : null; if (createdAt) { basicRepository.created = createdAt.format(simpleDateFormat); } let updatedAt = repository.updated_at ? moment(repository.updated_at) : null; if (updatedAt) { basicRepository.updated = updatedAt.format(simpleDateFormat); } let pushedAt = repository.pushed_at ? moment(repository.pushed_at) : null; if (pushedAt) { basicRepository.pushed = pushedAt.format(simpleDateFormat); } let mostRecentActivityMoment = createdAt; let mostRecentActivity = 'Created'; if (updatedAt && updatedAt.isAfter(mostRecentActivityMoment)) { mostRecentActivity = 'Updated'; mostRecentActivityMoment = updatedAt; } if (pushedAt && pushedAt.isAfter(mostRecentActivityMoment)) { mostRecentActivity = 'Pushed'; mostRecentActivityMoment = pushedAt; } const twoYearsAgo = moment().subtract(2, 'years'); const oneYearAgo = moment().subtract(1, 'years'); const nineMonthsAgo = moment().subtract(9, 'months'); const thirtyDaysAgo = moment().subtract(30, 'days'); const thisWeek = moment().subtract(7, 'days'); const today = moment().subtract(1, 'days'); const ageInMonths = today.diff(createdAt, 'months'); if (ageInMonths > 0) { basicRepository.ageInMonths = ageInMonths === 1 ? '1 month' : ageInMonths + ' months'; } const monthsSinceUpdates = today.diff(mostRecentActivityMoment, 'months'); const timeAsString = monthsSinceUpdates + ' month' + (monthsSinceUpdates === 1 ? '' : 's'); basicRepository.recentActivity = monthsSinceUpdates < 1 ? 'Active' : `${timeAsString} (${mostRecentActivity})`; if (mostRecentActivityMoment.isBefore(nineMonthsAgo)) { basicRepository.abandoned = { text: `${monthsSinceUpdates} months`, color: 'red', }; } if (exemptRepositories && exemptRepositories[repository.id] && exemptRepositories[repository.id].approved && exemptRepositories[repository.id].days) { const exemptionExpiresAt = moment(exemptRepositories[repository.id].approved) .add(exemptRepositories[repository.id].days, 'days') .subtract(2, 'weeks'); if (moment().isAfter(exemptionExpiresAt)) { basicRepository.exemptionExpiresAt = exemptionExpiresAt.format(simpleDateFormat); addEntityToIssueType(context, repositoryContext, 'expiringPrivateEngineeringExemptions', basicRepository, actionShip, actionDelete); } } else if (!repository.private && mostRecentActivityMoment.isBefore(twoYearsAgo)) { addEntityToIssueType(context, repositoryContext, 'abandonedPublicRepositories', basicRepository, actionDelete); } else if (repository.private && mostRecentActivityMoment.isBefore(twoYearsAgo)) { addEntityToIssueType(context, repositoryContext, 'twoYearOldPrivateRepositories', basicRepository, actionDelete); } else if (repository.private && createdAt.isBefore(oneYearAgo) && !privateEngineering) { addEntityToIssueType(context, repositoryContext, 'oneYearOldPrivateRepositories', basicRepository, actionDelete); } else if (repository.private && createdAt.isBefore(thirtyDaysAgo) && !privateEngineering) { addEntityToIssueType(context, repositoryContext, 'privateRepositoriesLessThanOneYear', basicRepository, actionShip, actionDelete); } else if (createdAt.isAfter(thisWeek) && !privateEngineering) { // New public and private repos const repositoryForManagerAndLawyer = shallowCloneWithAdditionalRecipients(basicRepository, repositoryContext.additionalRecipients); if (createdAt.isAfter(today)) { addEntityToIssueType(context, repositoryContext, 'NewReposToday', repositoryForManagerAndLawyer, actionView, actionViewInPortal); } // Always include in the weekly summary addEntityToIssueType(context, repositoryContext, 'NewReposWeek', repositoryForManagerAndLawyer, actionView, actionViewInPortal); } // Alert on public repos missing CLA if (!repository.private && !repositoryContext.hasCla) { addEntityToIssueType(context, repositoryContext, 'reposWithoutCLA', basicRepository, actionConfigureCLA); } // Alert on too many administrators, excluding private engineering organizations at this time if (!privateEngineering && repositoryContext.actionableAdministrators.length > context.settings.tooManyRepoAdministrators) { addEntityToIssueType(context, repositoryContext, 'repositoryTooManyAdministrators', basicRepository, actionViewInPortal, actionEditCollaborators); } return Q.delay(context, context.settings.repoDelayAfter || 0); }); }); }).catch(problem => { console.warn(problem); throw problem; }); } function shallowCloneWithAdditionalRecipients(basicRepository, additionalRecipients) { const clone = Object.assign({}, basicRepository); if (additionalRecipients && additionalRecipients.length) { clone.additionalRecipients = additionalRecipients; } return clone; } function getRepositoryWebhooks(repository) { const deferred = Q.defer(); repository.getWebhooks((error, webhooks) => { return error ? deferred.reject(error) : deferred.resolve(webhooks); }); return deferred.promise; } function identifyContributorLicenseAgreeementHooks(context, repositoryContext) { const repository = repositoryContext.repository; if (repository.private) { // We are only interested in public repositories with CLAs at this time return Q(context); } try { const claLegalEntities = repository.organization.legalEntities; if (!claLegalEntities || claLegalEntities.length === 0) { return Q(context); } } catch (notConfigured) { // This org does not have CLA configuration return Q(context); } return getRepositoryWebhooks(repository).then(webhooks => { let hasCla = false; for (let i = 0; i < webhooks.length; i++) { const webhook = webhooks[i]; if (webhook && webhook.config && knownHardcodedClaWebhooks.has(webhook.config.url)) { hasCla = true; break; } } repositoryContext.hasCla = hasCla; return Q(context); }, () => { return Q(context); }); } function getRepositoryApprovals(dataClient, repository, callback) { // Only repositories created on or after 4/24/2017 have the repoId stored in // the approval request. dataClient.getRepositoryApproval('repoId', repository.id, (byIdError, approvals) => { if (byIdError) { return callback(byIdError); } if (approvals && approvals.length > 0) { return callback(null, approvals); } dataClient.getRepositoryApproval('repoName', repository.name, callback); }); } function getNewRepoCreationInformation(context, repositoryContext, basicRepository) { const repository = repositoryContext.repository; const thisWeek = moment().subtract(7, 'days'); let createdAt = repository.created_at ? moment(repository.created_at) : null; let isBrandNew = createdAt.isAfter(thisWeek); const dataClient = context.dataClient; if (!isBrandNew || !dataClient) { return Q(context); } const deferred = Q.defer(); const releaseTypeMapping = context.config && context.config.github && context.config.github.approvalTypes && context.config.github.approvalTypes.fields ? context.config.github.approvalTypes.fields.approvalIdsToReleaseType : null; getRepositoryApprovals(dataClient, repositoryContext.repository, (error, approvals) => { if (error || !approvals || approvals.length === 0) { return deferred.resolve(context); } for (let i = 0; i < approvals.length; i++) { const approval = approvals[i]; if (approval && ( (approval.repoId == repositoryContext.repository.id /* not strict equal, data client IDs are strings vs GitHub responses use numbers */) || (approval.org && approval.org.toLowerCase() === repositoryContext.repository.organization.name.toLowerCase()))) { basicRepository.approvalLicense = approval.license; basicRepository.approvalJustification = approval.justification; if (approval.approvalType && releaseTypeMapping) { const approvalTypes = Object.getOwnPropertyNames(releaseTypeMapping); for (let j = 0; j < approvalTypes.length; j++) { const id = approvalTypes[j]; const title = releaseTypeMapping[id]; if (approval.approvalType === id) { basicRepository.approvalTypeId = approval.approvalType; // Hard-coded specific to show justification text or approval links if (id === 'ReleaseReview' && approval.approvalUrl) { basicRepository.approvalType = { text: title, link: approval.approvalUrl, }; } else if (id !== 'Exempt') { basicRepository.approvalType = title; } else { basicRepository.approvalType = `${title}: ${approval.justification}`; } } } } if (!basicRepository.approvalType) { basicRepository.approvalType = approval.approvalType; // Fallback if it's not configured in the system } const createdBy = approval.ghu; if (!createdBy) { return deferred.resolve(context); } else { basicRepository.createdBy = createdBy; return deferred.resolve(getIdFromUsername(context, repositoryContext.repository.organization, createdBy).then(id => { return getIndividualUserLink(context, id).then(link => { basicRepository.createdBy = link.aadname || basicRepository.createdBy; basicRepository.createdByUpn = link.aadupn; basicRepository.createdByLink = basicRepository.createdByUpn ? { link: `mailto:${basicRepository.createdByUpn}`, text: basicRepository.createdBy, } : basicRepository.createdBy; return link.aadupn ? augmentWithAdditionalRecipients(context, repositoryContext, link) : Q(context); }); })); } } } return deferred.resolve(context); }); return deferred.promise; } function augmentWithAdditionalRecipients(context, repositoryContext, createdByLink) { if (!createdByLink || !createdByLink.aadupn) { return Q(context); } const upn = createdByLink.aadupn; const createdByName = createdByLink.aadname || upn; const operations = context.operations; const mailAddressProvider = operations.providers.mailAddressProvider; // Only if the provider supports both advanced Microsoft-specific functions for now if (!mailAddressProvider || !mailAddressProvider.getLegalContactInformationFromUpn || !mailAddressProvider.getManagerInformationFromUpn) { return Q(context); } const fullRepoName = repositoryContext.repository.full_name; let additional = []; const deferred = Q.defer(); mailAddressProvider.getManagerInformationFromUpn(upn, (getManagerError, managerInformation) => { if (getManagerError) { console.warn(getManagerError); } else if (managerInformation && managerInformation.userPrincipalName) { const managerName = managerInformation.preferredName || managerInformation.alias || managerInformation.userPrincipalName; additional.push({ type: 'upn', value: managerInformation.userPrincipalName, reasons: [`${managerName} is the manager of ${createdByName} who created a new repository ${fullRepoName}`], }); } mailAddressProvider.getLegalContactInformationFromUpn(upn, (getLegalError, legalInformation) => { if (getLegalError) { console.warn(getLegalError); } else if (legalInformation && legalInformation.legalContact && legalInformation.legalContact.userPrincipalName) { const lc = legalInformation.legalContact; const legalFriendlyName = lc.preferredName || lc.alias || lc.userPrincipalName; const la = legalInformation.assignedTo; const assignedToFriendlyName = la.preferredName || la.alias || la.userPrincipalName; const why = la.userPrincipalName === upn ? ' who' : `'s org within which ${createdByName}`; let legalReason = `${legalFriendlyName} is the legal contact assigned to ${assignedToFriendlyName}${why} created a new repository ${fullRepoName}`; additional.push({ type: 'upn', value: legalInformation.legalContact.userPrincipalName, reasons: [legalReason], }); } if (additional.length) { repositoryContext.additionalRecipients = additional; } return deferred.resolve(context); }); }); return deferred.promise; } function getIdFromUsername(context, organization, username) { // Depends on this being a current member of an org const operations = context.operations; const deferred = Q.defer(); const cacheOptions = { backgroundRefresh: true, maxAgeSeconds: 60 * 60 * 24 * 7 /* 1 week */, }; operations.getMembers(organization.name, cacheOptions, (error, members) => { if (error) { return deferred.reject(error); } const match = username.toLowerCase(); for (let i = 0; i < members.length; i++) { if (members[i].login && members[i].login.toLowerCase() === match) { return deferred.resolve(members[i].id); } } return deferred.reject(); }); return deferred.promise; } function addEntityToIssueType(context, repositoryContext, type, entity, optionalAction1, optionalAction2) { const definition = definitionsByName[type]; if (!definition) { throw new Error(`No defined issue type ${type}`); } let hadActions = true && entity.actions; const entityClone = Object.assign({}, entity); if (hadActions) { delete entityClone.actions; } if (!entityClone.actions && optionalAction1) { entityClone.actions = { actions: [] }; } if (optionalAction1) { entityClone.actions.actions.push(optionalAction1); } if (optionalAction2) { entityClone.actions.actions.push(optionalAction2); } // Track that the definition was used provider-wide and per-entity repositoryContext.definitionsUsed.add(type); if (!context.visitedDefinitions[providerName]) { context.visitedDefinitions[providerName] = new Set(); } context.visitedDefinitions[providerName].add(type); const placeholder = repositoryContext.issues; let propertyName = null; if (!placeholder[type]) { const entry = {}; placeholder[type] = entry; if (definition.hasTable && definition.hasList) { throw new Error('Definitions cannot have both tables and lists at this time'); } if (definition.hasTable) { entry.rows = []; } if (definition.hasList) { entry.listItems = []; } } if (definition.hasTable && definition.hasList) { throw new Error('Definitions cannot have both tables and lists at this time'); } let listPropertiesName = null; if (definition.hasTable) { propertyName = 'rows'; listPropertiesName = 'table'; } if (definition.hasList) { propertyName = 'listItems'; listPropertiesName = 'list'; } if (!propertyName) { throw new Error('No definition items collection available'); } const dest = placeholder[type][propertyName]; dest.push(entityClone); const listProperties = definition[listPropertiesName]; if (listProperties && (listProperties.groupBy || listProperties.sortBy)) { const sortBy = [ dest, ]; if (listProperties.groupBy) { sortBy.push(listProperties.groupBy); } if (listProperties.sortBy) { sortBy.push(listProperties.sortBy); } const after = _.sortBy.apply(null, sortBy); placeholder[type][propertyName] = after; } } function identityAdministratorsWithoutLinks(repositoryContext) { const actionableAdministrators = repositoryContext.actionableAdministrators; const administratorsByType = { linked: actionableAdministrators.filter(admin => { return admin.link; }), unlinked: actionableAdministrators.filter(admin => { return !admin.link; }), }; repositoryContext.administratorsByType = administratorsByType; return Q(repositoryContext); } function justAdminTeams(teams) { return Q(teams.filter(team => { return team.permission === 'admin'; })); } function justAdminCollaborators(collaborators) { return Q(collaborators.filter(collaborator => { return collaborator.permissions.admin; })); } function getRepositoryTeams(repository, cacheOptions) { const deferred = Q.defer(); repository.getTeamPermissions(cacheOptions, (error, permissions) => { return error ? deferred.reject(error) : deferred.resolve(permissions); }); return deferred.promise; } function getRepositoryDirectCollaborators(repository) { const deferred = Q.defer(); const directCollaboratorOptions = { affiliation: 'direct', backgroundRefresh: true, maxAgeSeconds: 60 * 60 * 24, // full day allowed }; repository.getCollaborators(directCollaboratorOptions, (error, collaborators) => { return error ? deferred.reject(error) : deferred.resolve(collaborators); }); return deferred.promise; } function getRepos(context) { const deferred = Q.defer(); const operations = context.operations; operations.getRepos((error, repos) => { if (error) { return deferred.reject(error); } context.entities.repos = repos.sort((a, b) => { return a.full_name.localeCompare(b.full_name, 'en', {'sensitivity': 'base'}); }); return deferred.resolve(context); }); return deferred.promise; } function createUserEntry(basics) { return { login: basics.login, reasons: { memberships: [], directCollaborator: false, }, }; } function transformReasonsToArray(userEntry, repositoryName) { const reasons = []; // For efficiency reasons, direct collaborator wins over team memberships if (userEntry.reasons.directCollaborator) { reasons.push(`Administrator of the ${repositoryName} repository`); } else { for (let i = 0; i < userEntry.reasons.memberships.length; i++) { const team = userEntry.reasons.memberships[i]; reasons.push(`Member of the ${team.name} team with administrator rights to the ${repositoryName} repository`); } } if (!reasons.length) { reasons.push(`Unknown reason related to the ${repositoryName}`); } return reasons; } function storeCollaborators(administrators, collaborators) { return Q.all(collaborators.map(collaborator => { const id = collaborator.id; let entry = administrators.get(id); if (!entry) { entry = createUserEntry(collaborator); administrators.set(id, entry); } entry.reasons.collaborator = true; return Q(); })); } function buildReports(context) { return Q(context); } function consolidate(context) { // For any used definitions of a provider entity instance, add it to the generic report const consolidated = { definitions: [], entities: [], }; for (let i = 0; i < definitions.length; i++) { const definition = definitions[i]; if (!context.visitedDefinitions || !context.visitedDefinitions[providerName]) { return Q(context); } if (context.visitedDefinitions[providerName].has(definition.name)) { consolidated.definitions.push(definition); } } // Entities const sorted = _.sortBy(context.repositoryData, 'nameLowercase') ; // 'entityName'); // full_name groups by org name AND repo name naturally for (let i = 0; i < sorted.length; i++) { const fullEntity = sorted[i]; const reducedEntity = { name: fullEntity.name, }; const contextDirectProperties = [ 'issues', 'recipients', ]; cloneProperties(fullEntity, contextDirectProperties, reducedEntity); // Only store in the consolidated report if there are recipients for the entity const issueCounter = Object.getOwnPropertyNames(reducedEntity.issues); if (issueCounter && issueCounter.length && reducedEntity && reducedEntity.recipients && reducedEntity.recipients.length > 0) { consolidated.entities.push(reducedEntity); } else if (issueCounter && issueCounter.length) { console.warn(`There are no recipients to receive ${reducedEntity.name} reports with active issues`); } } context.consolidated[providerName] = consolidated; return Q(context); } function cloneProperties(source, properties, target) { for (let j = 0; j < properties.length; j++) { const property = properties[j]; if (source[property]) { target[property] = source[property]; } } } module.exports = { process: processRepositories, build: buildReports, consolidate: consolidate, }; <file_sep>/routes/org/team/members.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const router = express.Router(); const teamAdminRequired = require('./teamAdminRequired'); function refreshMembers(team2, backgroundRefresh, maxSeconds, firstPageOnly, callback) { const options = { maxAgeSeconds: maxSeconds || 60, backgroundRefresh: backgroundRefresh, }; if (firstPageOnly) { options.pageLimit = 1; } team2.getMembers(options, callback); } function refreshMembersAndSummary(team2, when, callback) { refreshMembers(team2, false /* immediately refresh */, when === 'now' ? -1 : null, true /* start with just the first page */, firstPageError => { refreshMembers(team2, false /* immediate */, when === 'now' ? -1 : null, false /* refresh all pages */, allPagesError => { return callback(firstPageError || allPagesError); }); }); } router.use((req, res, next) => { // Always make sure to have a relatively up-to-date membership cache available const team2 = req.team2; refreshMembers(team2, true /* background refresh ok */, null, false /* refresh all pages */, (error, members) => { req.refreshedMembers = members; return next(error); }); }); router.get('/refresh', (req, res, next) => { // Refresh all the pages and also the cached single-page view shown on the team page const team2 = req.team2; refreshMembersAndSummary(team2, 'whenever', error => { if (error) { return next(error); } return res.redirect(req.teamUrl); }); }); // Browse members router.use('/browse', (req, res, next) => { req.team2RemoveType = 'member'; return next(); }, require('../../peopleSearch')); // Add org members to the team router.use('/add', teamAdminRequired, (req, res, next) => { req.team2AddType = 'member'; return next(); }, require('../../peopleSearch')); router.post('/remove', teamAdminRequired, (req, res, next) => { const username = req.body.username; const team2 = req.team2; team2.removeMembership(username, removeError => { if (removeError) { return next(removeError); } req.oss.saveUserAlert(req, username + ' has been removed from the team ' + team2.name + '.', 'Team membership update', 'success'); refreshMembersAndSummary(team2, 'now', error => { if (error) { return next(error); } return res.redirect(req.teamUrl + 'members/browse/'); }); }); }); router.post('/add', teamAdminRequired, (req, res, next) => { const organization = req.organization; const team2 = req.team2; const refreshedMembers = req.refreshedMembers; const username = req.body.username; // Allow a one minute org cache for self-correcting validation const orgOptions = { maxAgeSeconds: 60, backgroundRefresh: true, }; // Validate that the user is a current org member organization.getMembership(username, orgOptions, (error, membership) => { if (error || !membership) { if (error && error.innerError && error.innerError.code === 404) { error = new Error(`${username} is not a member of the organization and so cannot be added to the team until they have joined the org.`); } if (!membership && !error) { error = new Error('No membership information available for the user'); } return next(error); } if (membership.state !== 'active') { return next(new Error(`${username} has the organization state of ${membership.state}. The user is not an active member and so cannot be added to the team at this time.`)); } // Make sure they are not already a member const lc = username.toLowerCase(); for (let i = 0; i < refreshedMembers.length; i++) { const member = refreshedMembers[i]; if (member.login.toLowerCase() === lc) { return next(new Error(`The user ${username} is already a member of the team.`)); } } team2.addMembership(username, error => { if (error) { return next(error); } req.oss.saveUserAlert(req, `Added ${username} to the ${team2.name} team.`, 'Team membership update', 'success'); refreshMembersAndSummary(team2, 'now', refreshError => { if (refreshError) { return next(refreshError); } return res.redirect(req.teamUrl + 'members/browse/'); }); }); }); }); module.exports = router; <file_sep>/middleware/github/teamPermissions.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = function addTeamPermissionsToRequest(req, res, next) { if (req.teamPermissions) { return next(); } const oss = req.oss; const login = oss.usernames.github; const id = oss.id.github ? parseInt(oss.id.github, 10) : null; const organization = req.organization; const teamPermissions = { allowAdministration: false, maintainer: false, sudo: false, }; req.teamPermissions = teamPermissions; organization.isSudoer(login, (sudoCheckError, isSudoer) => { if (sudoCheckError) { return next(sudoCheckError); } oss.isPortalAdministrator((portalSudoError, isPortalSudoer) => { if (portalSudoError) { return next(portalSudoError); } // Indicate that the user is has sudo rights if (isSudoer === true || isPortalSudoer === true) { teamPermissions.sudo = true; } // Get the team maintainers const team2 = req.team2; team2.getMaintainers((getMaintainersError, maintainers) => { if (getMaintainersError) { return next(getMaintainersError); } // +MIDDLEWARE: providing this later to speed up getting this data req.teamMaintainers = maintainers; for (let i = 0; i < maintainers.length; i++) { if (maintainers[i].id === id) { teamPermissions.maintainer = true; break; } } // Make a permission decision if (teamPermissions.maintainer || teamPermissions.sudo) { teamPermissions.allowAdministration = true; } return next(); }); }); }); }; <file_sep>/routes/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // var express = require('express'); var router = express.Router(); router.use('/api', require('./api')); router.use('/thanks', require('./thanks')); router.use('/myinfo', require('./diagnostics')); router.use('/explore', require('./explore')); router.use(require('./index-authenticated')); module.exports = router; <file_sep>/middleware/sslify.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const sslify = require('express-sslify'); module.exports = sslify.HTTPS( { trustAzureHeader: true } ); <file_sep>/config/utils/arrayFromString.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; // Split and set an optional array, or empty array, trimming each. If the // input is actually an array, just pass it back. module.exports = function (a, split) { if (!split) { split = ','; } if (a && Array.isArray(a)) { return a; } var b = a && a.split ? a.split(split) : []; if (b && b.length) { for (var i = 0; i < b.length; i++) { b[i] = b[i].trim(); } } return b; }; <file_sep>/README.md # opensource-portal This Node.js application is a part of the suite of services provided by the Open Source Programs Office at Microsoft to enable large-scale GitHub management experiences. Key features center around opinionated takes on at-scale management, with an emphasis on _relentless automation_ and _delegation_: - __Linking__: the concept of associating a GitHub identity with an authenticated identity in another provider, for example an Azure Active Directory user - __Self-service GitHub organization join__: one-click GitHub organization joining for authorized users - __Cross-organization functionality__: consolidated views across a set of managed GitHub organizations including people, repos, teams Before providing GitHub management functionality to all of Microsoft, this application started within Azure. > An introduction to this project is available in this 2015 post by <NAME>: [http://www.jeff.wilcox.name/2015/11/azure-on-github/](http://www.jeff.wilcox.name/2015/11/azure-on-github/) The app is a GitHub OAuth application; with the May 2017 release of GitHub Apps (formerly called Integrations), this app over time may be refactored to support the integration concept, removing the need to dedicate a user seat to a machine account. ## Node app - Node.js LTS (v6.10+ as of 5/31/17) - ES6 - Mixed callback and Q promises at this time ## Service Dependencies - At least one of your own GitHub organizations - Bring your own Redis server, or use Azure Redis Cache - Azure Active Directory, or hack your own Passport provider in - Azure Storage for table, `data.js` will need some refactoring to support other providers. _Other providers are being considered, including Azure Premium Table, for better performance. Help would be appreciated here!_ ## LICENSE [MIT License](LICENSE) ## Dev prep, build, deploy ### Prereqs #### Install Node packages Make sure to include dev dependencies ``` $ npm install ``` #### Suggested global NPM packages ``` $ npm install -g eslint bower mocha grunt-cli ember-cli ``` ### Build ``` $ npm run-script build ``` Which is equivalent to running: ``` $ bower install $ cd client $ npm install $ bower install $ cd .. $ grunt ``` ### Test This project is starting to get improved testability. But it will be a long slog. ``` $ npm test ``` Which is equivalent to running: ``` $ mocha $ eslint . ``` ## Contributions welcome Happy to have contributions, though please consider reviewing the CONTRIBUTING.MD file, the code of conduct, and then also open a work item to help discuss the features or functionality ahead of kicking off any such work. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [<EMAIL>](mailto:<EMAIL>) with any additional questions or comments. # Implementation Details ## Configuration The configuration story for this application has been evolving over time. At this time, the following configuration elements are available at this time, each with a distinct purpose. A GitHub organization(s) configuration file in JSON format is required as of version 4.2.0 of the app. - Environment Variables (see `configuration.js` for details) - JSON Files (either committed directly to a repo or overwritten during deployment) - `config/resources.json`: categories, links and special resources to light up learning resources - `config/organizations.json`: organization configuration information, an alternate and additive way to include organization config in the app at deployment time. For this method to work, make sure to set the configuration environment to use from such a file using the `CONFIGURATION_ENVIRONMENT` env variable. - [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) secrets With the current configuration story, a `CONFIGURATION_ENVIRONMENT` variable is required, as well as a secret for AAD to get KeyVault bootstrapped. That requirement will go away soon. ### KeyVault Secret Support Any configuration string property can be resolved to a KeyVault secret. To use a stored KeyVault secret, configuration to allow this application's service principal to `get` the secret value, simply use a custom `keyvault://` URI format. For example, given a key vault named `samplevault`, setting a configuration parameter to `keyvault://samplevault.vault.azure.net/secrets/secret-name/optionalVersion` would resolve that secret. To select a custom user `tag` for a secret, use the `auth` parameter of the URI: a value of `keyvault://username@samplevault.vault.azure.net/secrets/secret-name` would get the secret and its metadata, setting the configuration value to the `username` tag, if present. #### Key rotation As configuration, including secrets, is resolved at startup, any key rotation would need to include a restart of the app service. ## Application Insights When using Microsoft Application Insights, this library reports a number of metrics, events and dependencies. Library events include: - UserUnlink: When a user object is unlinked and dropped User interface events include: - PortalUserUnlink: When a person initiates and completes an unlink - PortalUserLink: When a person links their account - PortalUserReconnectNeeded: When a user needs to reconnect their GitHub account - PortalUserReconnected: When a user successfully reconnects their GitHub account when using AAD-first auth ## E-mail A custom mail provider is being used internally, but a more generic mail provider contract exists in the library folder for the app now. This replaces or optionally augments the ability of the app to do workflow over mail. Since Microsoft is an e-mail company and all. # API Please see the [API.md](API.md) file for information about the early API implementation. # Undocumented / special features This is meant to start an index of interesting features for operations use. ## people ### /people search view - Add a `type=former` query string parameter to show a current understanding of potential former employees who cannot be found in the directory - In the `type=former` view, portal system sudoers will receive a link next to the user to 'manage user', showing more information and the option to remove from the org ## repos ### /repos search view - Add a `showids=1` query string parameter to have repository IDs show up next to repository names <file_sep>/routes/placeholders.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); router.use('/data', (req, res) => { const exploreUrl = req.app.settings.runtimeConfig.microsoftOpenSource.explore; res.redirect(`${exploreUrl}resources/insights`); }); router.use('/use', (req, res) => { const exploreUrl = req.app.settings.runtimeConfig.microsoftOpenSource.explore; res.redirect(`${exploreUrl}resources/use`); }); router.use('/release', (req, res) => { const exploreUrl = req.app.settings.runtimeConfig.microsoftOpenSource.explore; res.redirect(`${exploreUrl}resources/release`); }); module.exports = router; <file_sep>/routes/org/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); const async = require('async'); const teamsRoute = require('./teams'); const reposRoute = require('./repos'); const membershipRoute = require('./membership'); const joinRoute = require('./join'); const leaveRoute = require('./leave'); const orgPermissions = require('../../middleware/github/orgPermissions'); const securityCheckRoute = require('./2fa'); const profileReviewRoute = require('./profileReview'); const approvalsSystem = require('../approvals'); const newRepoSpa = require('./newRepoSpa'); const peopleRoute = require('./people'); router.use(function (req, res, next) { var onboarding = req.query.onboarding; req.oss.addBreadcrumb(req, req.org.name, onboarding ? false : undefined); req.reposContext = { section: 'org', org: req.org, }; next(); }); // Routes that do not require that the user be an org member router.use('/join', joinRoute); router.use('/repos', reposRoute); router.use('/people', peopleRoute); router.use('/teams', teamsRoute); // Org membership requirement middleware router.use(orgPermissions, (req, res, next) => { const organization = req.organization; const orgPermissions = req.orgPermissions; if (!orgPermissions) { return next(new Error('Organization permissions are unavailable')); } // Decorate the route for the sudoer if (orgPermissions.sudo) { req.sudoMode = true; } const membershipStatus = orgPermissions.membershipStatus; if (membershipStatus === 'active') { return next(); } else { return res.redirect('/' + organization.name + '/join'); } }); // Org membership required endpoints: router.get('/', function (req, res, next) { const operations = req.app.settings.providers.operations; var org = req.org; var oss = req.oss; var dc = req.app.settings.dataclient; async.parallel({ organizationOverview: (callback) => { const uc = operations.getUserContext(oss.id.github); return uc.getAggregatedOrganizationOverview(org.name, callback); }, isMembershipPublic: function (callback) { org.queryUserPublicMembership(callback); }, orgUser: function (callback) { org.getDetails(function (error, details) { var userDetails = details ? org.oss.user(details.id, details) : null; callback(error, userDetails); }); }, /* CONSIDER: UPDATE ORG SUDOERS SYSTEM UI... isAdministrator: function (callback) { oss.isAdministrator(callback); }*/ }, function (error, results) { if (error) { return next(error); } if (results.isAdministrator && results.isAdministrator === true) { results.isSudoer = true; } var render = function (results) { oss.render(req, res, 'org/index', org.name, { accountInfo: results, org: org, }); }; // Check for pending approvals var teamsMaintained = results.organizationOverview.teams.maintainer; if (teamsMaintained && teamsMaintained.length && teamsMaintained.length > 0) { var teamsMaintainedHash = {}; for (var i = 0; i < teamsMaintained.length; i++) { teamsMaintainedHash[teamsMaintained[i].id] = teamsMaintained[i]; } results.teamsMaintainedHash = teamsMaintainedHash; dc.getPendingApprovals(teamsMaintained, function (error, pendingApprovals) { if (!error && pendingApprovals) { results.pendingApprovals = pendingApprovals; } render(results); }); } else { render(results); } }); }); router.use('/membership', membershipRoute); router.use('/leave', leaveRoute); router.use('/security-check', securityCheckRoute); router.use('/profile-review', profileReviewRoute); router.use('/approvals', approvalsSystem); router.use('/new-repo', (req, res) => { res.redirect(req.org.baseUrl + 'wizard'); }); router.use('/wizard', newRepoSpa); module.exports = router; <file_sep>/routes/settings/authorizations.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; // This feature is internal-only at this time. Assumes AAD-first auth scheme. const async = require('async'); const express = require('express'); const github = require('octonode'); const router = express.Router(); function createGithubTokenValidator(link, token) { return (callback) => { const me = github.client(token).me(); me.info((infoError, data, headers) => { let valid = true; let critical = false; let message = null; if (infoError) { valid = false; if (infoError.statusCode === 401 && infoError.message === 'Bad credentials') { message = 'GitHub token revoked or expired'; critical = true; } else { message = infoError.message; } } else { // NOTE: We use strings while GitHub does not if (data.id != link.ghid) { critical = true; valid = false; message = `This token is for a different user, "${data.login}", instead of "${link.ghu}".`; } else if (data.login != link.ghu) { message = `Your username may have changed. It once was "${link.ghu}" but is now "${data.login}". Your ID remains the same.`; } } const result = { valid: valid, message: message, critical: critical, rateLimitRemaining: headers && headers['x-ratelimit-remaining'] ? headers['x-ratelimit-remaining'] + ' remaining API tokens' : undefined, }; callback(null, result); }); }; } router.use((req, res, next) => { // This is a lightweight, temporary implementation of authorization management to help clear // stored session tokens for apps like GitHub, VSTS, etc. const link = req.link; const authorizations = []; if (link.githubToken) { authorizations.push({ validator: createGithubTokenValidator(link, link.githubToken), property: 'githubToken', title: 'GitHub Application: Public App Token', text: 'A GitHub token, authorizing this site, is stored. This token only has rights to read your public profile and validate that you are the authorized user of the GitHub account.', mitigations: [ { title: 'Clear GitHub tokens', url: '/settings/authorizations/github/clear', mitigation: 'Clear GitHub tokens', }, { title: 'Review your GitHub authorized applications', url: 'https://github.com/settings/applications', mitigation: 'Review your authorized GitHub applications', }, ] }); } if (link.githubTokenIncreasedScope) { authorizations.push({ validator: createGithubTokenValidator(link, link.githubTokenIncreasedScope), property: 'githubTokenIncreasedScope', title: 'GitHub Application: Organization Read/Write Token', text: 'A GitHub token, authorizing this site, is stored. The token has a scope to read and write your organization membership. This token is used to automate organization invitation and joining functionality without requiring manual steps.', mitigations: [ { title: 'Clear GitHub tokens', url: '/settings/authorizations/github/clear', mitigation: 'Clear GitHub tokens', }, ] }); } req.authorizations = authorizations; next(); }); router.get('/', (req, res) => { req.oss.render(req, res, 'settings/authorizations', 'Account authorizations', { authorizations: req.authorizations, }); }); router.get('/github/clear', (req, res, next) => { const link = req.link; const linkAuthorizationsToDrop = ['githubToken', 'githubTokenIncreasedScope', 'githubTokenUpdated', 'githubTokenIncreasedScopeUpdated']; linkAuthorizationsToDrop.forEach((property) => { delete link[property]; }); req.oss.modernUser().updateLink(link, (error) => { if (error) { return next(error); } req.oss.saveUserAlert(req, 'The GitHub tokens stored for this account have been removed. You may be required to authorize access to your GitHub account again to continue using this portal.', 'GitHub tokens cleared', 'success'); req.oss.invalidateLinkCache(() => { return res.redirect('/signout/github/'); }); }); }); router.get('/validate', (req, res, next) => { async.each(req.authorizations, (authorization, callback) => { const validator = authorization.validator; if (validator !== undefined && typeof validator === 'function') { validator((actualError, validationResult) => { if (actualError) { return callback(actualError); } authorization.valid = validationResult; if (validationResult.critical === true) { // TODO: Actually delete this token/authorization } callback(); }); } else { callback(); } }, (error) => { if (error) { return next(error); } req.oss.render(req, res, 'settings/authorizations', 'Account authorizations', { authorizations: req.authorizations, }); }); }); module.exports = router; <file_sep>/routes/org/teams.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const lowercaser = require('../../middleware/lowercaser'); const router = express.Router(); const utils = require('../../utils'); const teamRoute = require('./team/'); router.use(function (req, res, next) { req.org.oss.addBreadcrumb(req, 'Teams'); req.reposContext = { section: 'teams', org: req.org, }; next(); }); router.get('/', function (req, res, next) { const beforeLinkReferrer = utils.popSessionVariable(req, res, 'beforeLinkReferrer'); if (beforeLinkReferrer !== undefined) { return res.redirect(beforeLinkReferrer); } return next(); }); router.get('/', lowercaser(['sort', 'set']), require('../teamsPager')); router.use('/:teamSlug', (req, res, next) => { const legacyOrgInstance = req.org; const orgBaseUrl = legacyOrgInstance.baseUrl; const organization = req.organization; const slug = req.params.teamSlug; organization.getTeamFromName(slug, (getTeamError, team) => { // Redirect if a name was provided when a slug is more appropriate if (getTeamError && getTeamError.slug) { return res.redirect(`${orgBaseUrl}teams/${getTeamError.slug}`); } if (getTeamError) { return next(getTeamError); } // The `req.team` variable is currently used by the "legacy" // operations system, so for the time being until there is more // appropriate time for refactoring, this will have to do. req.team2 = team; // Set the legacy team instance as well const clone = Object.assign({}, team); const legacyTeam = legacyOrgInstance.team(team.id, clone); req.team = legacyTeam; // Difference: traditionally legacyTeam.getDetails(...) would also // be called now to fill out the properties; this happened without // a cache and was quite slow for no great value provided. Need // to confirm that this is OK now that it is omitted. // Breadcrumb and path updates req.teamUrl = `${orgBaseUrl}teams/${team.slug}/`; req.oss.addBreadcrumb(req, team.name); return next(); }); }); router.use('/:teamname', teamRoute); module.exports = router; <file_sep>/lib/graphProvider/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const providers = [ 'microsoftGraphProvider', ]; module.exports = function createGraphProviderInstance(config, callback) { const graphConfig = config.graph; if (!graphConfig) { return callback(new Error('No graph config.')); } const provider = graphConfig.provider; if (!provider) { return callback(new Error('No graph provider set in the graph config.')); } let found = false; providers.forEach((supportedProvider) => { if (supportedProvider === provider) { found = true; let providerInstance = null; try { providerInstance = require(`./${supportedProvider}`)(graphConfig); } catch (createError) { return callback(createError); } return callback(null, providerInstance); } }); if (found === false) { return callback(new Error(`The graph provider "${provider}" is not implemented or configured at this time.`)); } }; <file_sep>/views/email/email.pug //- //- Copyright (c) Microsoft. All rights reserved. //- include body include footer<file_sep>/views/email/repoApprovals/pleaseApprove.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../body block content h2 | #{approvalRequest.org}/#{approvalRequest.repoName} | &nbsp; small= approvalRequest.repoVisibility p. A user has requested a new repo in the #{approvalRequest.org} org. if approvalRequest.repoVisibility === 'private' p This request is for a private repo. p Per Microsoft policy, private GitHub repos are designated for projects going open source soon. p a(href=actionUrl, style='display:inline-block;background-color:#eee;font-size:18px;padding:12px') Review request h3 Request details table.technical(style='width:80%') thead tr th(colspan=2) Requested by tbody tr td p a(href='https://github.com/' + approvalRequest.ghu)= approvalRequest.ghu td= approvalRequest.email thead tr th(colspan=2) Repo information tbody tr td GitHub organization td= approvalRequest.org tr td Repository name td= approvalRequest.repoName if approvalRequest.repoDescription tr td Repo description td= approvalRequest.repoDescription tr td Initial visibility td= approvalRequest.repoVisibility === 'public' ? 'Public' : 'Private' if approvalRequest.justification thead tr th(colspan=2) Justification tr td(colspan=2)= approvalRequest.justification thead tr th(colspan=2) Permissions if approvalRequest.teamsCount tr td Authorized teams td= approvalRequest.teamsCount h4 Protect yourself from phishing p If you prefer not to click on email links, you can find this approval by: ul li Navigating to the open source repos site at #{reposSiteUrl} li View your alerts, or go to the <em>/approvals</em> URL li Review the request details <file_sep>/lib/github/cost.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; function createCostContainer() { return { local: { cacheHits: 0, cacheMisses: 0, }, redis: { cacheHit: 0, cacheMisses: 0, getCalls: 0, setCalls: 0, expireCalls: 0, }, github: { restApiCalls: 0, usedApiTokens: 0, cacheHits: 0, remainingApiTokens: 0, }, }; } function addCosts(a, b) { if (a && b) { a.local.cacheHits += b.local.cacheHits; a.local.cacheMisses += b.local.cacheMisses; a.redis.cacheHit += b.redis.cacheHit; a.redis.cacheMisses += b.redis.cacheMisses; a.redis.getCalls += b.redis.getCalls; a.redis.setCalls += b.redis.setCalls; a.redis.expireCalls += b.redis.expireCalls; a.github.restApiCalls += b.github.restApiCalls; a.github.usedApiTokens += b.github.usedApiTokens; a.github.cacheHits += b.github.cacheHits; // Min; though if the refresh happens in the middle this will be off if (b.github.remainingApiTokens > 0) { let floor = a.github.remainingApiTokens <= 0 ? b.github.remainingApiTokens : a.github.remainingApiTokens; a.github.remainingApiTokens = Math.min(floor, b.github.remainingApiTokens); } } return a; } module.exports = { create: createCostContainer, add: addCosts, }; <file_sep>/views/org/team/repos/repo/collaborators.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../../../layout block content div.container h1 Collaborators p. A collaborator on GitHub is someone who has access to a specific repo, with a specific set of permissions, but who is not necessarily a member of the organization. if corporateCollaborators && corporateCollaborators.length && corporateCollaborators.length > 0 h2 #{corporateCollaborators.length > 1 ? corporateCollaborators.length : ''} Corporate Collaborator#{corporateCollaborators.length > 1 ? 's' : ''} p These corporate users have repo-specific collaboration rights. This often is a good place to give a specific corporate user "admin" access for a repo to manage settings on GitHub such as web hooks. if true == true table.table thead tr th Avatar th GitHub Username th Authorization Type th Permission Level th Actions tbody for teamMember in corporateCollaborators tr td p if teamMember.avatar_url img(alt=teamMember.ghu, src=teamMember.avatar_url + '&s=100', style='margin-right:10px;width:50px;height:50px', width=50, height=50, data-user=teamMember.id) td a.btn.btn-sm.btn-muted(href='https://github.com/' + teamMember.login, title=teamMember.id)= teamMember.login td if teamMember.link p | Active Directory | &nbsp; a.btn.btn-sm.btn-muted(href=teamMember.corporateProfileUrl(), target='_new') = teamMember.corporateAlias() || teamMember.link.aadupn if teamMember.contactEmail() | &nbsp; &nbsp; a.btn.btn-sm.btn-default(href='mailto:' + teamMember.contactEmail()) Send Mail else p strong Not a portal user td - var rcp = teamMember._repoCollaboratorPermissions - var permissionColor = rcp.admin === true ? 'alert-danger' : (rcp.push === true ? 'btn-primary' : 'alert-success') - var permissionText = rcp.admin === true ? 'Admin' : (rcp.push === true ? 'Write' : 'Read') p a.btn.btn-sm(class=permissionColor, href='javascript:alert("Change the permission level by simply adding the same user again, with the new permission level.")')= permissionText td form(method='post', action=repoCollaboratorsUrl + '/' + teamMember.login + '/remove', onsubmit='return confirm("Are you sure that you want to remove ' + teamMember.login + ' as a collaborator?");') p input.btn.btn-sm.btn-default(type='submit', value='Remove Collaborator', data-transition='fade', data-theme='c') if collaborators if collaborators.length && collaborators.length > 0 h2 #{collaborators.length > 1 ? collaborators.length : ''} Outside Collaborator#{collaborators.length > 1 ? 's' : ''} p. The Outside Collaborator role is ideal for external vendors or partners on projects who do not have corporate credentials. p. It is the responsibility of repo team maintainers to carefully select, curate and manage their collaborators, since these users exist outside of the organizational management system. Employees should always join the org instead of being appointed as Outside Collaborators. div table.table thead tr th Avatar th GitHub Username th Authorization Type th Permission Level th Actions tbody for teamMember in collaborators tr td p if teamMember.avatar_url img(alt=teamMember.ghu, src=teamMember.avatar_url + '&s=100', style='margin-right:10px;width:50px;height:50px', width=50, height=50, data-user=teamMember.id) td a.btn.btn-sm.btn-muted(href='https://github.com/' + teamMember.login, title=teamMember.id)= teamMember.login td if teamMember.link p | Active Directory br a.btn.btn-sm.btn-muted(href=teamMember.corporateProfileUrl(), target='_new') = teamMember.corporateAlias() || teamMember.link.aadupn else p Outside Collaborator td - var rcp = teamMember._repoCollaboratorPermissions - var permissionColor = rcp.admin === true ? 'alert-danger' : (rcp.push === true ? 'btn-primary' : 'alert-success') - var permissionText = rcp.admin === true ? 'Admin' : (rcp.push === true ? 'Write' : 'Read') p a.btn.btn-sm(class=permissionColor, href='javascript:alert("Change the permission level by simply adding the same user again, with the new permission level.")')= permissionText td form(method='post', action=repoCollaboratorsUrl + '/' + teamMember.login + '/remove', onsubmit='return confirm("Are you sure that you want to remove ' + teamMember.login + ' as a collaborator?");') p input.btn.btn-sm.btn-default(type='submit', value='Remove Collaborator', data-transition='fade', data-theme='c') hr div.row div.col-md-6.col-lg-6.col-sm-6 h2 Add an Outside Collaborator p. An outside collaborator is not an employee. They are not tracked by this portal. As a team maintainer, <em>you</em> are responsible for your outside collaborators. p. When an outside collaborator's need for access is done, please remove the user from the repo. form.form-horizontal(method='post', action=repoCollaboratorsUrl + '/add') h5 GITHUB USERNAME p input.form-control#inputUsername(type='text', name='username', placeholder='GitHub username of the collaborator') h5 REPOSITORY p= repo.full_name h5 PERMISSION LEVEL FOR THE REPO p select.form-control#inputPermissionLevel(name='permission') option(value='admin') Admin: Manage repo settings, hooks, properties, even delete the repo option(value='push') Push (Write): Allow for direct pushes to this repo, accepting PRs option(value='pull', selected='selected') Pull (Read): Allow the user to pull from the repo, fork, submit PRs input.btn.btn-default.btn-sm(type='submit',value='Add Outside Collaborator',data-transition='fade', data-theme='c') div.col-md-6.col-lg-6.col-sm-6 h2 Add a Corporate Collaborator p. You can also add an existing organization member (an employee) as a collaborator on a specific repository. p. The most common scenario for the capability is to add a specific employee with "Admin" privilege. This will let that employee use the "settings" tab for the repo on the GitHub.com site to configure web hooks, repo properties, and other essentials. p. We still recommend the team approach for most operations. form.form-horizontal(method='post', action=repoCollaboratorsUrl + '/add') h5 CORPORATE IDENTITY p select.form-control#inputAddMember(name='username') each employee in employees option(value=employee.ghu, selected=employee.ghid == user.github.id)= employee.aadupn + ' : ' + employee.ghu h5 REPOSITORY p= repo.full_name h5 PERMISSION LEVEL FOR THE REPO p input(type='hidden', name='corporate', value='corporate') select.form-control#inputPermissionLevel(name='permission') option(value='admin') Admin: Manage repo settings, hooks, properties, even delete the repo option(value='push') Push (Write): Allow for direct pushes to this repo, accepting PRs option(value='pull', selected='selected') Pull (Read): Allow the user to pull from the repo, fork, submit PRs p input.btn.btn-default.btn-sm(type='submit',value='Add Corporate Collaborator',data-transition='fade', data-theme='c') <file_sep>/views/email/repoApprovals/autoCreated.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../body block content h1 #{approvalRequest.org}/#{approvalRequest.repoName} p | Repo URL:&nbsp; a(href='https://github.com/' + approvalRequest.org + '/' + approvalRequest.repoName, target='_new') https://github.com/#{approvalRequest.org}/#{approvalRequest.repoName} if api && service p.lead #{approvalRequest.repoName} was created by #{service} else if api p.lead #{approvalRequest.repoName} was created by a service else p.lead. Your repo <strong>"#{approvalRequest.repoName}"</strong> has been created in the <strong>"#{approvalRequest.org}"</strong> organization. if approvalRequest.repoVisibility == 'private' p. Since the new repo is private, you may receive a 404 if you yourself are not a member of one of the teams that you requested have access. Let <EMAIL> know if you need support. if approvalRequest.license h3 License p= approvalRequest.license if approvalRequest.approvalType h3 Open Source Approval p | Type of approval: #{approvalRequest.approvalType} if approvalRequest.approvalUrl br | Approval: #{approvalRequest.approvalUrl} if approvalRequest.justification br | Justification: #{approvalRequest.justification} if service || serviceOwner || serviceDescription h3 Service information p This repo was created by a service: table.technical thead tr if service th Service if serviceDescription th Service information tbody tr if service td= service if serviceDescription td= serviceDescription if results && results.length h3 Operations performed ul each result in results li(style=result.error ? 'color:red' : undefined)= result.message <file_sep>/routes/api/jsonError.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = function jsonError(error, statusCode) { if (error && error.length && error.indexOf && !error.message) { error = new Error(error); } if (!error) { error = new Error('An error occurred.'); } error.json = true; if (statusCode) { error.statusCode = statusCode; } return error; }; <file_sep>/business/common.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn"] }] */ const _ = require('lodash'); function assignKnownFields(self, entity, type, primaryProperties, secondaryProperties) { const copy = Object.assign({}, entity); const directSet = _.pick(copy, primaryProperties); _.keys(directSet).forEach(key => { delete copy[key]; }); Object.assign(self, directSet); if (secondaryProperties) { const otherSet = _.pick(copy, secondaryProperties); _.keys(otherSet).forEach(key => { delete copy[key]; }); if (!self.otherFields) { self.otherFields = {}; } Object.assign(self.otherFields, otherSet); } /* let remainingKeys = _.keys(copy); if (remainingKeys.length > 0) { console.warn(`Remaining ${type} entity properties: ${remainingKeys.length}`); } */ } function createInstancesCallback(self, createMethod, callback) { return function (error, entities) { if (error) { return callback(error); } let wrap = createMethod.bind(self); callback(null, _.map(entities, wrap)); }; } module.exports.assignKnownFields = assignKnownFields; module.exports.createInstancesCallback = createInstancesCallback; <file_sep>/views/layout.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- doctype html html(lang="en") include head body include alerts - var scheme = primaryAuthenticationScheme || (config ? config.authentication.scheme : null) include nav if reposContext && site === 'github' include reposToolbar //-include breadcrumbs // content block content // end of content include footer include insights include ga <file_sep>/views/org/requestRepo.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block js_doc_ready | function setVisibility(element, attribute, target) { | if ($(element).attr(attribute) == 'true') { | $(element).bind('click', function() { $(target).show({ duration: 200 }); }); | } else { | $(element).bind('click', function() { $(target).hide({ duration: 200 }); }); | } | } | var allLicenses = $('input[name="license"]'); | if(allLicenses.length > 0){ $(allLicenses[0]).attr('checked','checked');}; | var allApprovalTypes = $('input[name="approvalType"]'); | if (allApprovalTypes.length > 0) { | allApprovalTypes.each(function() { | setVisibility(this, 'url-required', '#approvalUrl'); | setVisibility(this, 'exemption-details-required', '#exemption-details'); | }); | $(allApprovalTypes[0]).trigger('click'); | } | var validatePermissions = function (){ | var message = 'You must select at least one write or admin team. Otherwise you will be unable to use your repo once created!'; if selectTeams each selectTeam in selectTeams | var itemValue = $('#existingTeamPermission#{selectTeam.number}').val(); | if (itemValue == 'admin' || itemValue == 'push') { message = ''; }; | var selectedApprovalType = $('input[name="approvalType"]:checked'); | if($(selectedApprovalType).attr('url-required') == 'true') | { var pattern = new RegExp($(selectedApprovalType).attr('url-format')); | var isMatch = pattern.test($('#inputApprovalUrl').val()); | if(isMatch == false){ | message += (message ? "\n" : "") + "Invalid Business Approval Url."; | } | } | return message; | }; | $('#permissionsForm').on('submit', function(e) { e.preventDefault(); var message = validatePermissions(); if (!message) { this.submit(); } else { alert(message); } }); | var teams = [null, if teams each team in teams | { value: '#{team.id}', label: '#{team.name}'}, | ]; block content div.container h1 Request a new #{org.name} repo if orgConfig.createReposDirect === true p.lead Your organization has not yet onboarded to repo creation inside this portal p At this time repositories need to be created directly on GitHub.com. Only owners or those with specific permissions to create repositories will be able to do this. p Please reach out to an organization owner if you do not have permission and need a repo. p a.btn.btn-lg.btn-primary(href='https://github.com/organizations/' + org.name + '/repositories/new', target='_new') | Go to GitHub repo create page else form.form-horizontal#permissionsForm(method='post') h4 Name p input.form-control#inputGitHubName(type='text', placeholder='Repo Name (no spaces)', name='name') h4 Description p input.form-control#inputGitHubName(type='text', placeholder='Public-facing description', name='description') h4 Language p Choose language-specific .gitignore file div select.form-control(name='language') option(value='', selected='selected') None each language in languages option(value=language)= language if templates h4 License &amp; template p Each open source repo must have an open source license agreed to by your business and legal team. each eachLicense in templates div label input(type='radio',name='license',value=eachLicense.name) span = ' ' + eachLicense.name if eachLicense.description = ': ' + eachLicense.description + ' ' if eachLicense.legalEntities = ' (' + eachLicense.legalEntities.join(' or ') + ' legal entity)' if approvalTypes h4 Approval Type p GitHub is our location for doing open source work. All repos must have a corresponding approval (or exemption) for working in the open. One approval can be used to create multiple repos as long as their scopes match. each eachApprovalType in approvalTypes div label input(type='radio', name='approvalType', value=eachApprovalType.value, url-required=eachApprovalType.urlRequired ? 'true' : undefined, url-format=eachApprovalType.format, exemption-details-required=eachApprovalType.exemptionDetailsRequired ? 'true' : undefined) span &nbsp;#{eachApprovalType.value} div#approvalUrl.col-sm-12.col-md-12.col-lg-12 p input.form-control#inputApprovalUrl(type='text', placeholder='Your https://osstool.microsoft.com approval URL', name='approvalUrl') div#exemption-details.col-sm-12.col-md-12.col-lg-12 p Please provide more information about your repo including business reasoning and the context of your anticipated involvement. p small This request and your justification note will be stored for historical reasons, so consider that your request may be viewed by engineering managers, directors and CVPs as they review the open source activities and compliance goals that are in place. p textarea.form-control(rows=3, name='justification', placeholder='Justification, open source approval information, and/or context for team maintainers and leadership.') h4 Initial Visibility if allowPrivateRepos === true && allowPublicRepos !== true p All new #{org.name} repos are initially private until ready to ship. //- Members of a repo team with admin permissions can make a repo public by changing its Settings in GitHub. if allowPrivateRepos !== true && allowPublicRepos === true p All new repos are initially public. Members of a repo team with admin permissions can make a repo private by changing its Settings in GitHub. if allowPrivateRepos === true && allowPublicRepos === true p You can choose to make your repo initially public or private. Members of a repo team with admin permissions can change the repo's visibility by changing its Settings in GitHub. div.form-group span.col-sm-2(style={'text-align': 'right'}) Repo Visibility div.col-sm-4 | <select name="visibility" form="permissionsForm"><option value="private">Private</option><option value="public">Public</option><select> h4 Teams and permissions //-p Permissions to a repo are granted on a per-team basis. Below, identify the teams and their maximum permission level for this repo. p If you need new teams, <a href="https://github.com/orgs/#{orgName}/new-team" target="_new">create them in GitHub</a> before submitting this request. div.container input(type='hidden', name='teamCount', value=selectTeams.length) each selectTeam in selectTeams - var isSpecialReadTeam = false div.row( class={ 'alert-gray': selectTeam.adminOnly || (selectTeam.id && selectTeam.info), 'ms-red-border-top': selectTeam.adminOnly, 'ms-blue-border-top': !selectTeam.adminOnly && selectTeam.id && selectTeam.info, 'transparent-border': !selectTeam.adminOnly && !selectTeam.id && !selectTeam.info } ) div.col-sm-12.col-md-12.col-lg-12 if selectTeam.adminOnly h5 Repo Admin Team else if selectTeam.id && selectTeam.info - isSpecialReadTeam = true h5 | Recommended Read Team&nbsp; small= selectTeam.name p= selectTeam.info else h5 Optional Team ##{selectTeam.number} div.row(class=selectTeam.adminOnly || (selectTeam.id && selectTeam.info) ? 'alert-gray' : undefined) div.col-sm-7.col-md-7.col-lg-7 p select.form-control(name='existingTeam' + selectTeam.number) option(value=0, selected=selectTeam.id ? 'selected' : undefined) None each team in teams if isSpecialReadTeam if selectTeam.id === team.id option(value=team.id, selected=team.id == selectTeam.id ? 'selected' : undefined)= team.name -// else do not show the other teams else option(value=team.id, selected=team.id == selectTeam.id ? 'selected' : undefined)= team.name div.col-sm-5.col-md-5.col-lg-5 p select.form-control(name='existingTeamPermission' + selectTeam.number, id='existingTeamPermission' + selectTeam.number) if !selectTeam.adminOnly option(value='pull', selected='selected') Read (Pull, Fork) if !selectTeam.readOnly option(value='push') Write (Push, Accept PRs) if !selectTeam.readOnly option(value='admin') Admin (Web Hooks, Collaborators, Settings) if approvers && approvers.length > 1 h4 Request Approver#{approvers.length > 1 ? 's' : ''} for #{org.name} p Repo decisions for this organization are made by the following employee#{approvers.length > 1 ? 's' : ''}: p for teamMember in approvers span.person-tile if teamMember.avatar() img(alt=teamMember.login, src=teamMember.avatar('80'), style='margin-right:10px;width:20px;height:20px', width=20, height=20) a.btn.btn-sm.btn-muted(href='https://github.com/' + teamMember.login, title=teamMember.id, target='_new')= teamMember.login if teamMember.contactEmail() a.btn.btn-sm.btn-muted-more(href='mailto:' + teamMember.contactEmail())= teamMember.contactName() if orgHasCla === true .row.col-md-12.col-lg-12 h4 Contributor License Agreement p. This organization is integrated with the corporate CLA system. Please provide a team discussion list or e-mail address to notify about submitted CLAs. This e-mail address needs to be able to accept e-mail from outside the company. .row.col-md-4.col-lg-4 p.lead Legal Entity select.form-control(name='claEntity') each team, key in claTeams option(value=key)= key option No CLA .row.col-md-8.col-lg-8 p.lead Notification email(s) p input.form-control(type='text', name='claMail', placeholder='Comma-separated list of e-mail addresses to set') p br input.btn.btn-primary(type='submit', value='Submit Request') | &nbsp; a.btn.btn-default(href='/') Cancel <file_sep>/lib/redis.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const debug = require('debug')('oss-redis'); const debugCrossOrganization = require('debug')('oss-redis-cross-org'); const Q = require('q'); const zlib = require('zlib'); const compressionOptions = { type: 'gzip', params: { level: zlib.Z_BEST_SPEED, }, }; function RedisHelper(redisClient, prefix) { this.redis = redisClient; this.prefix = prefix ? prefix + '.' : ''; } function objectFromJson(json, callback) { var error = null; var object = null; try { if (json) { object = JSON.parse(json); } } catch (ex) { error = ex; object = null; } callback(error, object); } function objectToJson(object, callback) { var error = null; var json = null; try { json = JSON.stringify(object); } catch (ex) { error = ex; } callback(error, json); } RedisHelper.prototype.getSet = function (key, callback) { var k = this.prefix + key; this.redis.smembers(k, callback); }; RedisHelper.prototype.addSetMember = function (key, member, callback) { var k = this.prefix + key; this.redis.sadd(k, member, callback); }; RedisHelper.prototype.removeSetMember = function (key, member, callback) { var k = this.prefix + key; this.redis.srem(k, member, callback); }; RedisHelper.prototype.get = function (key, callback) { var k = this.prefix + key; debug('GET ' + k); if (k.includes('.x#')) { debugCrossOrganization(' GET ' + k); } this.redis.get(k, callback); }; RedisHelper.prototype.getCompressed = function (key, callback) { var k = this.prefix + key; debug('GET ' + k); if (k.includes('.x#')) { debugCrossOrganization(' GET ' + k); } const bufferKey = new Buffer(k); this.redis.get(bufferKey, (error, buffer) => { if (error) { return process.nextTick(callback, error); } if (buffer === undefined || buffer === null) { return process.nextTick(callback, null, buffer); } zlib.gunzip(buffer, (unzipError, unzipped) => { // Fallback if there is a data error (i.e. it's not compressed) if (unzipError && unzipError.errno === zlib.Z_DATA_ERROR) { const originalValue = buffer.toString(); return process.nextTick(callback, null, originalValue); } else if (unzipError) { return process.nextTick(callback, unzipError); } const unzippedValue = unzipped.toString(); return process.nextTick(callback, null, unzippedValue); }); }); }; RedisHelper.prototype.set = function (key, value, callback) { var k = this.prefix + key; debug('SET ' + k); this.redis.set(k, value, callback); }; RedisHelper.prototype.setCompressed = function (key, value, options, callback) { if (!callback && typeof(options) === 'function') { callback = options; options = null; } options = options || {}; const minutesToExpire = options.minutesToExpire || null; var k = this.prefix + key; if (minutesToExpire) { debug('SET ' + k + ' EX ' + minutesToExpire + 'm'); } else { debug('SET ' + k); } const val = new Buffer(value); zlib.gzip(val, compressionOptions, (gzipError, compressed) => { if (gzipError) { return callback(gzipError); } const bufferKey = new Buffer(k); if (minutesToExpire) { this.redis.set(bufferKey, compressed, 'EX', minutesToExpire * 60, callback); } else { this.redis.set(bufferKey, compressed, callback); } }); }; RedisHelper.prototype.delete = function (key, callback) { var k = this.prefix + key; debug('DEL ' + k); this.redis.del(k, callback); }; RedisHelper.prototype.setWithExpire = function (key, value, minutesToExpire, callback) { if (!minutesToExpire) { return callback(new Error('No minutes to expiration provided.')); } var k = this.prefix + key; debug('SET ' + k + ' EX ' + minutesToExpire + 'm'); this.redis.set(k, value, 'EX', minutesToExpire * 60, callback); }; RedisHelper.prototype.setCompressedWithExpire = function (key, value, minutesToExpire, callback) { if (!minutesToExpire) { return callback(new Error('No minutes to expiration provided.')); } const options = { minutesToExpire: minutesToExpire, }; return this.setCompressed(key, value, options, callback); }; RedisHelper.prototype.expire = function (key, minutesToExpire, callback) { if (!minutesToExpire) { return callback(new Error('No minutes to expiration provided.')); } var k = this.prefix + key; // debug('EXP ' + k + ' ' + minutesToExpire + 'm'); this.redis.expire(k, minutesToExpire * 60, callback); }; // Helper versions for object/json conversions RedisHelper.prototype.getObject = function (key, callback) { this.get(key, function (error, json) { if (error) { return callback(error); } objectFromJson(json, callback); }); }; RedisHelper.prototype.getObjectCompressed = function (key, callback) { this.getCompressed(key, function (error, json) { if (error) { return callback(error); } objectFromJson(json, callback); }); }; RedisHelper.prototype.setObject = function (key, value, callback) { var self = this; objectToJson(value, function (error, json) { if (!error) { self.set(key, json, callback); } else { callback(error); } }); }; RedisHelper.prototype.setObjectWithExpire = function (key, value, minutesToExpire, callback) { var self = this; objectToJson(value, function (error, json) { if (!error) { self.setWithExpire(key, json, minutesToExpire, callback); } else { callback(error); } }); }; RedisHelper.prototype.setObjectCompressedWithExpire = function (key, value, minutesToExpire, callback) { const self = this; objectToJson(value, function (error, json) { if (!error) { self.setCompressedWithExpire(key, json, minutesToExpire, callback); } else { callback(error); } }); }; RedisHelper.prototype.getAsync = function (key) { return Q.ninvoke(this, 'get', key); }; RedisHelper.prototype.getCompressedAsync = function (key) { return Q.ninvoke(this, 'getCompressed', key); }; RedisHelper.prototype.getObjectAsync = function (key) { return Q.ninvoke(this, 'getObject', key); }; RedisHelper.prototype.getObjectCompressedAsync = function (key) { return Q.ninvoke(this, 'getObjectCompressed', key); }; RedisHelper.prototype.setAsync = function (key, value) { return Q.ninvoke(this, 'set', key, value); }; RedisHelper.prototype.setObjectAsync = function (key, value) { return Q.ninvoke(this, 'setObject', key, value); }; RedisHelper.prototype.setObjectWithExpireAsync = function (key, value, minutesToExpire) { return Q.ninvoke(this, 'setObjectWithExpire', key, value, minutesToExpire); }; RedisHelper.prototype.setObjectCompressedWithExpireAsync = function (key, value, minutesToExpire) { return Q.ninvoke(this, 'setObjectCompressedWithExpire', key, value, minutesToExpire); }; RedisHelper.prototype.setCompressedWithExpireAsync = function (key, value, minutesToExpire) { return Q.ninvoke(this, 'setCompressedWithExpire', key, value, minutesToExpire); }; RedisHelper.prototype.setWithExpireAsync = function (key, value, minutesToExpire) { return Q.ninvoke(this, 'setWithExpire', key, value, minutesToExpire); }; RedisHelper.prototype.expireAsync = function (key, minutesToExpire) { return Q.ninvoke(this, 'expire', key, minutesToExpire); }; RedisHelper.prototype.deleteAsync = function (key) { return Q.ninvoke(this, 'delete', key); }; module.exports = RedisHelper; <file_sep>/config/logging.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const pkg = require('../package.json'); const logging = { errors: 'env://SITE_SKIP_ERRORS?default=0&trueIf=0', version: pkg.version, }; module.exports = logging; <file_sep>/middleware/links/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = function addLinkToRequest(req, res, next) { if (req.link) { return next(); } const oss = req.oss; const user = oss.modernUser(); if (!user) { return res.redirect('/'); } const link = user.link; if (!link) { return res.redirect('/'); } req.link = link; return next(); }; <file_sep>/config/github.templates.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; let templates = null; try { templates = require('../data/templates/definitions.json'); } catch (notFound) { /* no action required */ } module.exports = templates; <file_sep>/lib/github/collections.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const _ = require('lodash'); const async = require('async'); const debug = require('debug')('oss-github'); const Q = require('q'); const composite = require('./composite'); const core = require('./core'); const cost = require('./cost'); const githubEntityClassification = require('../../data/github-entity-classification.json'); const branchDetailsToCopy = githubEntityClassification.branches.keep; const repoDetailsToCopy = githubEntityClassification.repo.keep; const teamDetailsToCopy = githubEntityClassification.team.keep; const memberDetailsToCopy = githubEntityClassification.member.keep; const teamPermissionsToCopy = githubEntityClassification.teamPermissions.keep; const teamRepoPermissionsToCopy = githubEntityClassification.repoTeamPermissions.keep; function createIntelligentMethods(libraryContext, githubCall) { const getNextPage = libraryContext.getNextPage; const hasNextPage = libraryContext.hasNextPage; function getGithubCollection(token, methodName, options, callback) { let done = false; let results = []; let recentResult = null; let requests = []; let pages = 0; const pageLimit = options.pageLimit || Number.MAX_VALUE; function processResult(next, error, result) { if (error) { done = true; } else { recentResult = result; if (result) { ++pages; if (Array.isArray(result)) { results = results.concat(result); } requests.push({ cost: result.cost, meta: result.meta, }); } done = pages >= pageLimit || !hasNextPage(result); } if (!done && !error && result.meta && result.meta['retry-after']) { const delaySeconds = result.meta['retry-after']; debug(`Retry-After header was present. Delaying before next page ${delaySeconds}s.`); return setTimeout(() => { next(); }, delaySeconds * 1000); } next(error); } async.whilst( () => { return !done; }, (next) => { let method = recentResult ? getNextPage : githubCall; let args = [token]; let cb = processResult.bind(null, next); recentResult ? args.push(recentResult) : args.push(methodName, options); args.push(cb); method.apply(null, args); }, (error) => { callback(error, error ? undefined : results, error ? undefined : requests); }); } function getFilteredGithubCollection(token, methodName, options, propertiesToKeep, callback) { const keepAll = !propertiesToKeep; return getGithubCollection(token, methodName, options, (error, results, requests) => { if (error) { return callback(error); } const repos = []; for (let i = 0; i < results.length; i++) { const doNotModify = results[i]; if (doNotModify) { const r = {}; _.forOwn(doNotModify, (value, key) => { if (keepAll || propertiesToKeep.indexOf(key) >= 0) { r[key] = value; } }); repos.push(r); } } callback(null, repos, requests); }); } function getFilteredGithubCollectionWithMetadataAnalysis(token, methodName, options, propertiesToKeep) { const deferred = Q.defer(); getFilteredGithubCollection(token, methodName, options, propertiesToKeep, (error, results, requests) => { if (error) { return deferred.reject(error); } const pages = []; let dirty = false; let dirtyModified = []; let compositeCost = cost.create(); for (let i = 0; i < requests.length; i++) { if (requests[i] && requests[i].meta && requests[i].meta.etag) { pages.push(requests[i].meta.etag); } else { throw new Error('Invalid set of responses for pages'); } if (requests[i] && requests[i].meta && requests[i].meta.statusActual && requests[i].meta.statusActual !== 304) { dirty = true; let lastModified = requests[i].meta['last-modified']; if (lastModified) { dirtyModified.push(lastModified); } } if (requests[i] && requests[i].cost) { cost.add(compositeCost, requests[i].cost); } } if (dirtyModified.length > 0) { debug('Last-Modified response was present. This work is not yet implemented.'); // Some types, typically direct entities, will return this value; collections do not. // Would want to use the Last-Modified over the refresh time, sorting to find the latest. } results.meta = { pages: pages, dirty: dirty, }; results.cost = compositeCost; deferred.resolve(results); }); return deferred.promise; } function generalizedCollectionMethod(token, apiName, method, options, cacheOptions, callback) { if (callback === undefined && typeof (cacheOptions) === 'function') { callback = cacheOptions; cacheOptions = {}; } const apiContext = composite.create(apiName, method, options); apiContext.maxAgeSeconds = cacheOptions.maxAgeSeconds || 600; apiContext.token = token; apiContext.libraryContext = libraryContext; if (cacheOptions.backgroundRefresh) { apiContext.backgroundRefresh = true; } return core.execute(apiContext, callback); } function getCollectionAndFilter(token, options, githubClientMethod, propertiesToKeep) { return function (token, options) { return getFilteredGithubCollectionWithMetadataAnalysis(token, githubClientMethod, options, propertiesToKeep); }; } function generalizedCollectionWithFilter(name, githubClientMethod, propertiesToKeep, token, options, cacheOptions, callback) { return generalizedCollectionMethod(token, name, getCollectionAndFilter(token, options, githubClientMethod, propertiesToKeep), options, cacheOptions, callback); } return { getOrgRepos: function getOrgRepos(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('orgRepos', 'repos.getForOrg', repoDetailsToCopy, token, options, cacheOptions, callback); }, getOrgTeams: function getOrgTeams(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('orgTeams', 'orgs.getTeams', teamDetailsToCopy, token, options, cacheOptions, callback); }, getOrgMembers: function getOrgMembers(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('orgMembers', 'orgs.getMembers', memberDetailsToCopy, token, options, cacheOptions, callback); }, getRepoTeams: function getRepoTeams(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('repoTeamPermissions', 'repos.getTeams', teamPermissionsToCopy, token, options, cacheOptions, callback); }, getRepoCollaborators: function getRepoCollaborators(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('repoCollaborators', 'repos.getCollaborators', memberDetailsToCopy, token, options, cacheOptions, callback); }, getRepoBranches: function getRepoBranches(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('repoBranches', 'repos.getBranches', branchDetailsToCopy, token, options, cacheOptions, callback); }, getTeamMembers: function getTeamMembers(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('teamMembers', 'orgs.getTeamMembers', memberDetailsToCopy, token, options, cacheOptions, callback); }, getTeamRepos: function getTeamRepos(token, options, cacheOptions, callback) { return generalizedCollectionWithFilter('teamRepos', 'orgs.getTeamRepos', teamRepoPermissionsToCopy, token, options, cacheOptions, callback); }, }; } module.exports = createIntelligentMethods; <file_sep>/lib/emailRender.js // // Copyright (c) Microsoft. All rights reserved. // 'use strict'; const path = require('path'); const pug = require('pug'); module.exports.render = function (basedir, viewName, options, callback) { options = options || {}; if (!viewName) { viewName = 'email'; } if (!options.view) { options.view = viewName; } let html = null; try { const view = path.join(basedir, `views/email/${viewName}.pug`); options.pretty = true; options.basedir = basedir; html = pug.renderFile(view, options); } catch (renderError) { return callback(renderError); } return callback(null, html); }; <file_sep>/lib/github/crossOrganization.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const async = require('async'); const Q = require('q'); const composite = require('./composite'); const core = require('./core'); function createMethods(libraryContext, collectionsClient) { function generalizedCollectionMethod(token, apiName, method, options, cacheOptions, callback) { if (callback === undefined && typeof (cacheOptions) === 'function') { callback = cacheOptions; cacheOptions = {}; } const apiContext = composite.create(apiName, method, options); apiContext.maxAgeSeconds = cacheOptions.maxAgeSeconds || 600; apiContext.token = token; apiContext.libraryContext = libraryContext; if (cacheOptions.backgroundRefresh) { apiContext.backgroundRefresh = true; } return core.execute(apiContext, callback); } function getCrossOrganizationMethod(orgsAndTokens, apiName, methodName, options, cacheOptions, callback) { const method = collectionsClient[methodName]; if (!method) { throw new Error(`No method called ${method} defined in the collections client.`); } const crossOrgMethod = function () { const values = {}; values.meta = {}; values.orgs = {}; const deferred = Q.defer(); async.eachOfLimit(orgsAndTokens, 1, (token, orgName, next) => { const localOptions = Object.assign({}, options); localOptions.org = orgName; if (!localOptions.per_page) { localOptions.per_page = 100; } const localCacheOptions = Object.assign({}, cacheOptions); if (localCacheOptions.individualMaxAgeSeconds) { localCacheOptions.maxAgeSeconds = localCacheOptions.individualMaxAgeSeconds; } method(token, localOptions, localCacheOptions, (orgError, orgValues) => { if (orgError) { return next(orgError); } values.orgs[orgName] = orgValues; return next(); }); }, (error) => { if (error) { return deferred.reject(error); } deferred.resolve(values); }); return deferred.promise; }; return generalizedCollectionMethod(orgsAndTokens, apiName, crossOrgMethod, options, cacheOptions, callback); } function crossOrganizationCollection(orgsAndTokens, options, cacheOptions, innerKeyType, outerFunction, collectionMethodName, collectionKey, optionalSetOrganizationLogin) { return () => { const deferred = Q.defer(); const entities = []; entities.meta = {}; outerFunction(orgsAndTokens, {}, cacheOptions, (outerError, entitiesByOrg) => { if (outerError) { return deferred.reject(outerError); } const localCacheOptions = Object.assign({}, cacheOptions); if (localCacheOptions.individualMaxAgeSeconds) { localCacheOptions.maxAgeSeconds = localCacheOptions.individualMaxAgeSeconds; } entities.meta = {}; async.eachLimit(Object.getOwnPropertyNames(entitiesByOrg.orgs), 1, (orgName, nextOrg) => { const orgEntities = entitiesByOrg.orgs[orgName]; async.eachLimit(orgEntities, 1, (orgEntity, next) => { const cloneTarget = optionalSetOrganizationLogin ? { organization: { login: orgName, } } : {}; const entityClone = Object.assign(cloneTarget, orgEntity); const localOptionsTarget = { per_page: 100, }; switch (innerKeyType) { case 'team': localOptionsTarget.id = orgEntity.id; break; case 'repo': localOptionsTarget.owner = orgName; localOptionsTarget.repo = orgEntity.name; break; default: throw new Error(`Unsupported inner key type ${innerKeyType}`); } const localOptions = Object.assign(localOptionsTarget, options); delete localOptions.maxAgeSeconds; delete localOptions.backgroundRefresh; const token = orgsAndTokens[orgName]; if (!token) { return next(new Error(`No token available for the org "${orgName}"`)); } collectionsClient[collectionMethodName](token, localOptions, localCacheOptions, (collectionsError, innerEntities) => { // This is a silent error for now, because there // are valid scenarios, i.e. team deletion, to consider. // In the future, get smarter here. if (collectionsError) { return next(); } entityClone[collectionKey] = innerEntities; entities.push(entityClone); return next(); }); }, nextOrg); }, (error) => { return error ? deferred.reject(error) : deferred.resolve(entities); }); }); return deferred.promise; }; } function getAllTeams(orgsAndTokens, options, cacheOptions, callback) { options.apiTypePrefix = 'github.x#'; return getCrossOrganizationMethod(orgsAndTokens, 'teams', 'getOrgTeams', options, cacheOptions, callback); } function getAllRepos(orgsAndTokens, options, cacheOptions, callback) { options.apiTypePrefix = 'github.x#'; return getCrossOrganizationMethod(orgsAndTokens, 'repos', 'getOrgRepos', options, cacheOptions, callback); } return { orgMembers: function getAllMembers(orgsAndTokens, options, cacheOptions, callback) { options.apiTypePrefix = 'github.x#'; return getCrossOrganizationMethod(orgsAndTokens, 'orgMembers', 'getOrgMembers', options, cacheOptions, callback); }, teams: getAllTeams, teamMembers: function getAllTeamMembers(orgsAndTokens, options, cacheOptions, callback) { options.apiTypePrefix = 'github.x#'; return generalizedCollectionMethod(orgsAndTokens, 'teamMembers', crossOrganizationCollection(orgsAndTokens, options, cacheOptions, 'team', getAllTeams, 'getTeamMembers', 'members', true), options, cacheOptions, callback); }, repos: getAllRepos, repoCollaborators: function getAllRepoCollaborators(orgsAndTokens, options, cacheOptions, callback) { options.apiTypePrefix = 'github.x#'; return generalizedCollectionMethod(orgsAndTokens, 'repoCollaborators', crossOrganizationCollection(orgsAndTokens, options, cacheOptions, 'repo', getAllRepos, 'getRepoCollaborators', 'collaborators', true), options, cacheOptions, callback); }, repoTeams: function getAllRepoTeams(orgsAndTokens, options, cacheOptions, callback) { options.apiTypePrefix = 'github.x#'; return generalizedCollectionMethod( orgsAndTokens, 'repoTeams', crossOrganizationCollection(orgsAndTokens, options, cacheOptions, 'repo', getAllRepos, 'getRepoTeams', 'teams', true), options, cacheOptions, callback); }, }; } module.exports = createMethods; <file_sep>/middleware/passport/plainSerializer.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; /*eslint no-console: ["error", { allow: ["warn"] }] */ function serialize(config, user, done) { done(null, user); } function deserialize(config, user, done) { done(null, user); } function initialize() { console.warn('Plain text session tokens are in use. Not recommended for production.'); } module.exports = { serialize: serialize, deserialize: deserialize, initialize: initialize, }; <file_sep>/config/github.approvalTypes.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const arrayFromString = require('./utils/arrayFromString'); const approvalFieldsFileVariableName = 'GITHUB_APPROVAL_FIELDS_FILE'; module.exports = function (graphApi) { const environmentProvider = graphApi.environment; const fieldsFile = environmentProvider.get(approvalFieldsFileVariableName); const approvalFields = fieldsFile ? require(`../data/${fieldsFile}`) : undefined; return { repo: arrayFromString(environmentProvider.get('REPO_APPROVAL_TYPES') || 'github'), teamJoin: arrayFromString(environmentProvider.get('TEAM_JOIN_APPROVAL_TYPES') || 'github'), fields: approvalFields, }; }; <file_sep>/resources/scss/_build.scss @import "../../bower_components/bootswatch/yeti/_variables"; @import "../../bower_components/bootstrap-sass/assets/stylesheets/bootstrap"; @import "../../bower_components/bootswatch/yeti/_bootswatch"; <file_sep>/views/repos/autocomplete.js var engine = new Bloodhound({ name: 'allOrgsRepos', local: [], remote: { url: '/repos/search?q=%QUERY', wildcard: '%QUERY', }, datumTokenizer: function(d) { return Bloodhound.tokenizers.whitespace(d.val); }, queryTokenizer: Bloodhound.tokenizers.whitespace, }); engine.initialize(); $('.typeahead').typeahead(null, { name: 'allOrgsRepos', displayKey: 'val', source: engine, }); <file_sep>/business/repoSearch.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const Q = require('q'); const moment = require('moment'); const querystring = require('querystring'); const defaultPageSize = 20; // GitHub.com seems to use a value around 33 function repoSearch(repos, options) { options = options || {}; this.repos = repos; // is repoStore in opensource.microsoft.com, this is different by design this.pageSize = options.pageSize || defaultPageSize; this.observedLanguages = new Set(); this.observedLanguagesEncoded = new Map(); this.phrase = options.phrase; this.type = options.type; this.language = options.language; this.graphManager = options.graphManager; if (options.specificTeamRepos && options.specificTeamPermissions) { this.specificTeamRepos = options.specificTeamRepos; this.specificTeamPermissions = options.specificTeamPermissions; } if (options.teamsType && options.repoPermissions) { this.teamsType = options.teamsType; this.teamsSubType = options.teamsSubType; this.repoPermissions = options.repoPermissions; this.userRepos = options.userRepos; } } repoSearch.prototype.search = function search(tags, page, sort) { const self = this; self.page = parseInt(page); self.tags = tags; self.sort = sort ? sort.charAt(0).toUpperCase() + sort.slice(1) : 'Pushed'; return Q.all( self.filterBySpecificTeam(self.specificTeamRepos) .filterByLanguageAndRecordAllLanguages(self.language) .filterByType(self.type) .filterByPhrase(self.phrase) .filterByTeams(self.teamsType) .determinePages()['sortBy' + self.sort]() .getPage(self.page) .augmentInformation() ); }; repoSearch.prototype.augmentInformation = function() { // Make sure we have the pretty dates and all for what's been selected this.repos.forEach(repo => { setupLocalDateInstances(repo); repo.momentDisplay = {}; if (repo.moment.updated) { repo.momentDisplay.updated = repo.moment.updated.fromNow(); } if (repo.moment.created) { repo.momentDisplay.created = repo.moment.created.fromNow(); } if (repo.moment.pushed) { repo.momentDisplay.pushed = repo.moment.pushed.fromNow(); } }); return this; }; repoSearch.prototype.determinePages = function() { this.totalPages = Math.ceil(this.repos.length / this.pageSize); this.totalRepos = this.repos.length; return this; }; repoSearch.prototype.getPage = function(page) { this.repos = this.repos.slice((page - 1) * this.pageSize, ((page - 1) * this.pageSize) + this.pageSize); this.pageFirstRepo = 1 + ((page - 1) * this.pageSize); this.pageLastRepo = this.pageFirstRepo + this.repos.length - 1; return this; }; repoSearch.prototype.sortByStars = function() { this.repos.sort((a, b) => { return b.stargazers_count - a.stargazers_count; }); return this; }; function repoMatchesPhrase(repo, phrase) { // Poor man's search, starting with just a raw includes search // Assumes that phrase is already lowercase to work let string = ((repo.name || '') + (repo.description || '')).toLowerCase(); return string.includes(phrase); } repoSearch.prototype.filterByType = function (type) { let filter = null; switch (type) { case 'public': filter = r => { return r.private === false; }; break; case 'private': filter = r => { return r.private === true; }; break; case 'source': filter = r => { return r.fork === false; }; break; case 'fork': filter = r => { return r.fork === true; }; break; } if (filter) { this.repos = this.repos.filter(filter); } return this; }; repoSearch.prototype.filterByPhrase = function (phrase) { if (phrase) { phrase = phrase.toLowerCase(); this.repos = this.repos.filter(r => { return repoMatchesPhrase(r, phrase); }); } return this; }; repoSearch.prototype.filterBySpecificTeam = function (specificTeamRepos) { if (specificTeamRepos) { // Also augment individual repos with permissions information const reposAndPermissions = new Map(); specificTeamRepos.forEach(specificTeamAndPermission => { reposAndPermissions.set(specificTeamAndPermission.id, specificTeamAndPermission.permissions); }); this.repos = this.repos.filter(repo => { const permissions = reposAndPermissions.get(repo.id); if (permissions) { repo.permissions = permissions; } return !!permissions; }); } return this; }; repoSearch.prototype.filterByTeams = function (teamsType) { if (teamsType === 'teamless' || teamsType === 'my') { const repoPermissions = this.repoPermissions; if (!repoPermissions) { throw new Error('Missing team and repo permissions instances to help filter by teams'); } const repos = new Set(); switch (teamsType) { case 'my': { const subType = this.teamsSubType; this.userRepos.forEach(repo => { const myPermission = repo.personalized.permission; let ok = false; if (subType === 'admin' && myPermission === 'admin') { ok = true; } else if (subType === 'write' && (myPermission === 'admin' || myPermission === 'write')) { ok = true; } else if (subType === 'read') { ok = true; } if (ok) { repos.add(repo.id); } }); break; } case 'teamless': { repoPermissions.forEach(repo => { if (!repo.teams || repo.teams.length === 0) { repos.add(repo.id); } }); break; } } this.repos = this.repos.filter(repo => { return repos.has(repo.id); }); } return this; }; repoSearch.prototype.filterByLanguageAndRecordAllLanguages = function (language) { const self = this; this.repos = this.repos.filter(r => { // Fill the set with all languages before filtering if (r.language) { self.observedLanguages.add(r.language); self.observedLanguagesEncoded.set(r.language, querystring.escape(r.language)); self.observedLanguagesEncoded.set(r.language.toLowerCase(), querystring.escape(r.language)); } if (!language) { return true; } if (r.language) { return r.language.toLowerCase() === language.toLowerCase(); } }); return this; }; repoSearch.prototype.sortByForks = function() { this.repos.sort((a, b) => { return b.forks_count - a.forks_count; }); return this; }; repoSearch.prototype.sortBySize = function() { this.repos.sort((a, b) => { if (a.size > b.size) { return -1; } else if (a.size < b.size) { return 1; } return 0; }); return this; }; repoSearch.prototype.sortByAlphabet = function() { this.repos.sort((a, b) => { let nameA = a.name.toLowerCase(); let nameB = b.name.toLowerCase(); if (nameA < nameB) { return -1; } if (nameA > nameB) { return 1; } return 0; }); return this; }; function setupLocalDateInstances(repo) { if (repo.moment) { return; } const updated = repo.updated_at ? moment(repo.updated_at) : undefined; const pushed = repo.pushed_at ? moment(repo.pushed_at) : undefined; const created = repo.created_at ? moment(repo.created_at) : undefined; repo.moment = { updated: updated, pushed: pushed, created: created, }; } function sortDates(a, b) { // Inverted sort (newest first) return b.isAfter(a) ? 1 : -1; } repoSearch.prototype.sortByUpdated = function() { this.repos = this.repos.filter(r => { return r.updated_at; }); this.repos.sort((a, b) => { setupLocalDateInstances(a); setupLocalDateInstances(b); return sortDates(a.moment.updated, b.moment.updated); }); return this; }; repoSearch.prototype.sortByCreated = function() { this.repos = this.repos.filter(r => { return r.created_at; }); this.repos.sort((a, b) => { setupLocalDateInstances(a); setupLocalDateInstances(b); return sortDates(a.moment.created, b.moment.created); }); return this; }; repoSearch.prototype.sortByPushed = function() { this.repos = this.repos.filter(r => { return r.pushed_at; }); this.repos.sort((a, b) => { setupLocalDateInstances(a); setupLocalDateInstances(b); return sortDates(a.moment.pushed, b.moment.pushed); }); return this; }; repoSearch.prototype.filterPublic = function(publicOnly) { if (publicOnly) { this.repos = this.repos.filter(r => { return !r.private; }); } return this; }; module.exports = repoSearch; <file_sep>/views/welcome.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends layout block content - var primary = config.authentication.scheme === 'github'? 'GitHub' : config.brand.companyName - var secondary = config.authentication.scheme === 'github'? config.brand.companyName : 'GitHub' div.container div.row div.col-md-12.col-lg-12.col-sm-12 if config.authentication.scheme === 'github' && user && user.github h1 Hi, #{user.github.displayName || user.github.username} else if config.authentication.scheme === 'aad' && user && user.azure h1 Hi, #{user.azure.displayName || user.azure.username} if config.authentication.scheme === 'github' p.lead To onboard, please authenticate with Active Directory. p a.btn.btn-lg.btn-primary(href='/signin/azure') Sign in to #{config.brand.companyName} else if config.authentication.scheme === 'aad' p.lead Linking your #{config.brand.companyName} and GitHub accounts gives you access to the #{config.brand.companyName} orgs, teams, and repos on GitHub. We already know your #{primary} id, so just sign in to the #{secondary} account you want to link and we'll set that up. div.row div.col-md-6.col-lg-6.col-sm-6 div a.btn.btn-primary.btn-huge.full-width(href='/signin/github') h3 I already have a GitHub account p Sign in div.col-md-6.col-lg-6.col-sm-6 div a.btn.btn-default.btn-huge.full-width(href='/signin/github/join') h3 I'm new to GitHub p Create a GitHub account div.row div.col-md-12.col-lg-12.col-sm-12 div(style='padding:12px') p.lead Linking should take 2-10 minutes p The process has just a few steps depending on whether or not you already have a GitHub account and multi-factor authentication setup. ul if config.authentication.scheme === 'github' li Validate the corporate account to link with #{user.github.username}. if config.authentication.scheme === 'aad' li Validate the GitHub account to link with #{user.azure.username}. li Verify and/or configure multi-factor authentication for your GitHub account. li [Optional] Join #{config.brand.companyName} GitHub organizations. li [Optional] Request access to GitHub teams. if config.authentication.scheme === 'aad' hr p.lead Multiple GitHub accounts? p. Since Git repositories can be locally configured to mark commits with a name and e-mail address, it's easy to use a single GitHub account for both #{config.brand.companyName} and other GitHub projects you work on. We recommend you use one GitHub account as this personalizes your contributions and builds a stronger sense of community. <file_sep>/middleware/passport/serializer.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const serializer = {}; function ensureSerializer(config) { const serializerKey = config.session.encryption; if (!serializer[serializerKey]) { serializer[serializerKey] = require(serializerKey === true ? './encryptionSerializer' : './plainSerializer'); } return serializer[serializerKey]; } function createSerialize(options) { return ensureSerializer(options.config).serialize.bind(null, options); } function createDeserialize(options) { return ensureSerializer(options.config).deserialize.bind(null, options); } function initialize(options, app) { const serializerInstance = ensureSerializer(options.config); const initializer = serializerInstance.initialize; if (initializer) { // Allow an opportunity to provide a warning or connect a route initializer(options, app, serializerInstance); } } module.exports = { initialize: initialize, serialize: createSerialize, deserialize: createDeserialize, }; <file_sep>/views/email/report.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends body block content != html <file_sep>/views/email/npm/published.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../body block content if context.securityUpdates h3 Publishing/packaging scripts skipped p The NPM publishing service is not a build service and is not able to transpile or otherwise run publishing-related NPM scripts. p The user who authorized the publish #[strong acknowledged that it is OK to proceed] with publishing just the package files in lieu of running the defined publishing scripts. p If a project is implemented in TypeScript, CoffeeScript, or other technology and needs to transpile, a minor incremental version update should now be performed. p As the requesting user has been authorized to publish the NPM module, the user's standard NPM token and build process can be continued. p Alternate options included: ul li Offering to publish from an alternate branch that could have the publish output committed li Publishing a skeleton (nearly empty) package from a branch, then turning over control to the author if log h3 Operations ul each le in log if le li= le <file_sep>/views/teams/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block append js_doc_ready include ../js/search.js block content - var languageColor = viewServices.languageColor - var octicon = viewServices.octicon .container if organization h3= organization.name + ' Teams' else h1 GitHub Teams p.lead Across all officially managed Microsoft organizations .row .col-md-8 form.form-horizontal#entitySearch(style='margin-top:24px') .form-group .col-md-5 div.input-group input.form-control#inputQuery( placeholder='Search repositories...', type='text', value=query && query.phrase ? query.phrase : null, style='max-width:400px') span.input-group-btn button( class='btn btn-muted' type='submit' style='border-width: 1px') Search .col-md-7 ul.nav.nav-pills li(class=(query.set === 'all' ? 'active' : ''), title='All teams') a(href='?set=all') | All teams li(class=(query.set === 'your' ? 'active' : ''), title='Teams you are a member or maintainer of') a(href='?set=your') | Your teams li(class=(query.set === 'available' ? 'active' : ''), title='Teams that are available and you are not a member of') a(href='?set=available') | Available teams if filters.length p(style='margin-top:24px') if search.totalTeams > 1 strong= search.totalTeams.toLocaleString() | results else if search.totalTeams === 1 strong 1 | result else | No results = ' for ' each filter in filters = filter.displayPrefix ? filter.displayPrefix + ' ' : '' strong= filter.displayValue || filter.value = ' ' = filter.displaySuffix ? filter.displaySuffix + ' ' : '' a.pull-right.btn.btn-sm.btn-muted-more(href='?') != octicon('x', 14) = ' Clear filter' hr if search.totalTeams === 0 .well.well-lg div.text-center p != octicon('organization', 24) if organization if query.set === 'your' p.lead None of your teams match. p a.btn.btn-primary.btn-sm(href='?set=available') View available teams else p.lead This organization doesn't have any teams that match. else p.lead No teams match across all organizations. else nav(style='margin-bottom:48px') ul.pager li.previous(class=(search.page > 1 ? '' : 'disabled')) a(href='?page_number=' + (search.page-1) + (query.set ? '&set=' + query.set : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") &larr; Previous li | #{search.pageFirstTeam.toLocaleString()} - #{search.pageLastTeam.toLocaleString()} of #{search.totalTeams.toLocaleString()} li.next(class=(search.page < search.totalPages ? '' : 'disabled')) a(href='?page_number=' + (search.page+1) + (query.set ? '&set=' + query.set : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") Next &rarr; .container each team in search.teams .row.vertical-pad-bottom // team: name, id, slug, description, organization.login //- TODO: consider fixing this to be consistent in cross-org vs single org - var orgName = team.organization.login ? team.organization.login : team.organization.name - var isMultipleOrganizationView = !organization && team.organization - var teamLink = '/' + orgName + '/teams/' + team.slug .col-md-6 .team(id=team.slug) h3: a(href=teamLink)= team.name if team.description p=team.description .col-md-4(style={ 'margin-top': isMultipleOrganizationView ? null : '30px' }) if isMultipleOrganizationView h4(style='margin-top:12px')= orgName //- Actions - var userStatus = yourTeamsMap.get(team.id) if userStatus === 'maintainer' p: a.btn.btn-sm.btn-default(href=teamLink, title='As a Team Maintainer, you can manage this team') Manage team else if userStatus === 'member' p: a.btn.btn-sm.btn-muted-more(href=teamLink, title='You are a member of this team') You're a team member else p: a.btn.btn-sm.btn-muted(href=teamLink, title='Learn more about this team') Team information nav ul.pager li.previous(class=(search.page > 1 ? '' : 'disabled')) a(href='?page_number=' + (search.page-1) + (query.set ? '&set=' + query.set : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") &larr; Previous li h4(style="display:inline") | Page #{search.page} of #{search.totalPages} li.next(class=(search.page < search.totalPages ? '' : 'disabled')) a(href='?page_number=' + (search.page+1) + (query.set ? '&set=' + query.set : '') + (query.phrase ? '&q=' + query.phrase : '')) span(aria-hidden="true") Next &rarr; .col-md-3.col-md-offset-1 div(style='margin-left:12px') if orgs h4 Need to create a team? p Any organization member can create a new team directly on the GitHub.com site. hr h4 Organizations ul.list-unstyled each org in orgs li(style='margin-bottom:8px') a.btn.btn-muted-more.btn-sm( href='/' + org.name + '/teams' )= org.name <file_sep>/middleware/passport-config.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const passport = require('passport'); const serializer = require('./passport/serializer'); const GitHubStrategy = require('../thirdparty/passport-github').Strategy; const OIDCStrategy = require('passport-azure-ad').OIDCStrategy; // FYI: GitHub does not provide refresh tokens function githubResponseToSubset(accessToken, refreshToken, profile, done) { let subset = { github: { accessToken: accessToken, displayName: profile.displayName, avatarUrl: profile._json && profile._json.avatar_url ? profile._json.avatar_url : undefined, id: profile.id, username: profile.username, }, }; return done(null, subset); } function githubResponseToIncreasedScopeSubset(accessToken, refreshToken, profile, done) { let subset = { githubIncreasedScope: { accessToken: accessToken, id: profile.id, username: profile.username, }, }; return done(null, subset); } function activeDirectorySubset(iss, sub, profile, done) { // CONSIDER: TODO: Hybrid tenant checks. // Internal-only code: // ---------------------------------------------------------------- // We've identified users with e-mail addresses in AAD similar to // myoutlookaddress#live.com. These are where people have had work // shared with them through a service like Office 365; these users // are not technically employees with active credentials, and so // they should *not* have access. We reject here before the // session tokens can be saved. // if (username && username.indexOf && username.indexOf('#') >= 0) { // return next(new Error('Your hybrid tenant account, ' + username + ', is not permitted for this resource. Were you invited as an outside collaborator by accident? Please contact us if you have any questions.')); // } let subset = { azure: { displayName: profile.displayName, oid: profile.oid, username: profile.upn, }, }; done(null, subset); } module.exports = function (app, config) { if (!config.authentication.scheme) { config.authentication.scheme = 'github'; } if (config.authentication.scheme !== 'github' && config.authentication.scheme !== 'aad') { throw new Error(`Unsupported primary authentication scheme type "${config.authentication.scheme}"`); } // ---------------------------------------------------------------------------- // GitHub Passport session setup. // ---------------------------------------------------------------------------- let githubOptions = { clientID: config.github.oauth2.clientId, clientSecret: config.github.oauth2.clientSecret, callbackURL: config.github.oauth2.callbackUrl, appInsightsClient: app.get('appInsightsClient'), scope: [], userAgent: 'passport-azure-oss-portal-for-github' // CONSIDER: User agent should be configured. }; let githubPassportStrategy = new GitHubStrategy(githubOptions, githubResponseToSubset); let aadStrategy = new OIDCStrategy({ redirectUrl: config.activeDirectory.redirectUrl, allowHttpForRedirectUrl: config.webServer.allowHttp, realm: config.activeDirectory.tenantId, clientID: config.activeDirectory.clientId, clientSecret: config.activeDirectory.clientSecret, oidcIssuer: config.activeDirectory.issuer, identityMetadata: 'https://login.microsoftonline.com/' + config.activeDirectory.tenantId + '/.well-known/openid-configuration', responseType: 'id_token code', responseMode: 'form_post', validateIssuer: true, }, activeDirectorySubset); // Validate the borrow some parameters from the GitHub passport library if (githubPassportStrategy._oauth2 && githubPassportStrategy._oauth2._authorizeUrl) { app.set('runtime/passport/github/authorizeUrl', githubPassportStrategy._oauth2._authorizeUrl); } else { throw new Error('The GitHub Passport strategy library may have been updated, it no longer contains the expected Authorize URL property within the OAuth2 object.'); } if (githubPassportStrategy._scope && githubPassportStrategy._scopeSeparator) { app.set('runtime/passport/github/scope', githubPassportStrategy._scope.join(githubPassportStrategy._scopeSeparator)); } else { throw new Error('The GitHub Passport strategy library may have been updated, it no longer contains the expected Authorize URL property within the OAuth2 object.'); } passport.use('github', githubPassportStrategy); passport.use('azure-active-directory', aadStrategy); // ---------------------------------------------------------------------------- // Expanded OAuth-scope GitHub access for org membership writes. // ---------------------------------------------------------------------------- let expandedGitHubScopeStrategy = new GitHubStrategy({ clientID: config.github.oauth2.clientId, clientSecret: config.github.oauth2.clientSecret, callbackURL: config.github.oauth2.callbackUrl + '/increased-scope', scope: ['write:org'], userAgent: 'passport-azure-oss-portal-for-github' // CONSIDER: User agent should be configured. }, githubResponseToIncreasedScopeSubset); passport.use('expanded-github-scope', expandedGitHubScopeStrategy); app.use(passport.initialize()); app.use(passport.session()); const serializerOptions = { config: config, keyResolver: app.get('keyEncryptionKeyResolver'), }; passport.serializeUser(serializer.serialize(serializerOptions)); passport.deserializeUser(serializer.deserialize(serializerOptions)); serializer.initialize(serializerOptions, app); app.use((req, res, next) => { if (req.insights && req.insights.properties && config.authentication.scheme === 'aad' && req.user && req.user.azure) { req.insights.properties.aadId = req.user.azure.oid; } next(); }); return passport; }; <file_sep>/lib/repo.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const async = require('async'); const debug = require('debug')('azureossportal'); const githubEntityClassification = require('../data/github-entity-classification.json'); const utils = require('../utils'); const Issue = require('./issue'); const ossManagementDb = require('./ossManagementDb'); function OpenSourceRepo(orgInstance, repoName, optionalGitHubInstance) { if (!orgInstance) { throw new Error('orgInstance is not defined.'); } this.org = orgInstance; this.oss = this.org.oss; var i = repoName.indexOf('/'); if (i >= 0) { this.full_name = repoName; var orgName = repoName.substring(0, i); repoName = repoName.substring(i + 1); if (orgName.toLowerCase() !== orgInstance.name.toLowerCase()) { debug('WARNING: The org name does not match: (' + orgName + ', ' + orgInstance.name + ')'); } } else { this.full_name = orgInstance.name + '/' + repoName; } this.name = repoName; this.inner = { issues: {} }; this.otherFields = {}; this._detailsLoaded = false; if (optionalGitHubInstance) { setDetails(this, optionalGitHubInstance); } } // ---------------------------------------------------------------------------- // Properties of interest in the standard GitHub response for a user // ---------------------------------------------------------------------------- var detailsToCopy = githubEntityClassification.repo.keep; var detailsToSkip = githubEntityClassification.repo.strip; // ---------------------------------------------------------------------------- // Creates a GitHub API client for this repo. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.createGitHubRepoClient = function (alternateToken) { var client = this.org.createGenericGitHubClient(alternateToken); debug('creating repo client for ' + this.org.name + '/' + this.name); return client.repo(this.org.name + '/' + this.name); }; // ---------------------------------------------------------------------------- // Retrieve the details for the repo. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.getDetails = function (callback) { 'use strict'; const self = this; const token = this.org.inner.settings.ownerToken; const options = { owner: this.org.name, repo: this.name, }; return this.oss.githubLibrary.call(token, 'repos.get', options, (error, details) => { if (error) { return callback(utils.wrapError(error, 'Could not get details about the repo. It may not exist.')); } setDetails(self, details); callback(null, details); }); }; // ---------------------------------------------------------------------------- // Get contribution statistics for the repo. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.contributorsStatsOneTime = function (callback) { this.createGitHubRepoClient().contributorsStats(function (error, stats) { if (error) { var er = utils.wrapError(error, ''); if (error && error.status && error.status == 202) { er.status = 202; } return callback(er); } callback(null, stats); }); }; // ---------------------------------------------------------------------------- // Add a collaborator with a specified permission level. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.addCollaborator = function (githubUsername, permissionLevel, callback) { if (typeof permissionLevel == 'function') { callback = permissionLevel; permissionLevel = 'pull'; } this.createGitHubRepoClient().addCollaborator(githubUsername, { permission: permissionLevel, }, function (error /*, ignoredInfo */) { if (error) { var userIntended = error.statusCode == 404; var message = error.statusCode == 404 ? 'The GitHub username "' + githubUsername + '" does not exist.' : 'The collaborator could not be added to GitHub at this time. There may be a problem with the GitHub API.'; return callback(utils.wrapError(error, message, userIntended)); } callback(); }); }; // ---------------------------------------------------------------------------- // Remove a collaborator. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.removeCollaborator = function (githubUsername, callback) { var self = this; this.createGitHubRepoClient().removeCollaborator(githubUsername, function (error) { if (error) { return callback(utils.wrapError(error, 'The collaborator could not be removed at this time. Was "' + githubUsername + '" even a collaborator for ' + self.name + '?')); } callback(); }); }; // ---------------------------------------------------------------------------- // Retrieve the list of teams that maintain this repo. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.teams = function getRepoTeamList(allowRedis, callback) { var self = this; if (typeof allowRedis == 'function') { callback = allowRedis; allowRedis = true; } var instancesFromJson = function (teamInstances) { async.map(teamInstances, function (teamInstance, cb) { cb(null, self.org.team(teamInstance.id, teamInstance)); }, callback); }; var redisKey = 'org#' + self.org.name + '/repo#' + self.name + ':teams'; self.oss.redis.getObject(redisKey, function (error, data) { if (!error && data && allowRedis === true) { return instancesFromJson(data); } var ghrepo = self.createGitHubRepoClient(); ghrepo.teams(function (error, teamInstances) { if (error) { return callback(error); } self.oss.redis.setObjectWithExpire(redisKey, teamInstances, utils.randomInteger(20, 90), function () { instancesFromJson(teamInstances); }); }); }); }; // ---------------------------------------------------------------------------- // Retrieve all web hooks directly installed on the GitHub repo. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.getWebhooks = function (callback) { const client = this.createGitHubRepoClient(); utils.retrieveAllPages(client.hooks.bind(client), (error, hooks) => { if (error) { return callback(utils.wrapError(error, 'Could not retrieve the web hooks for the repo.')); } callback(null, hooks); }); }; // ---------------------------------------------------------------------------- // Retrieve all web hooks directly installed on the GitHub repo (no pagination). // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.getAllWebhooks = function (callback) { const client = this.createGitHubRepoClient(); client.hooks(callback); }; // ---------------------------------------------------------------------------- // Delete a web hook by id directly installed on the GitHub repo. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.deleteWebhook = function (id, callback) { const client = this.createGitHubRepoClient(); client.deleteHook(id, callback); }; // ---------------------------------------------------------------------------- // Create a web hook directly installed on the GitHub repo. // events object example: ['push', 'pull_request'] // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.createWebhook = function (url, events, callback) { const client = this.createGitHubRepoClient(); client.hook({ name: 'web', active: true, events: events, config: { url: url, content_type: 'json', } }, callback); }; // ---------------------------------------------------------------------------- // Set the legacy CLA automation information for the repo. // This method should not be open sourced, as it is an internal API for // system migration purposes. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.enableLegacyClaAutomation = function enableLegacyClaAutomation(options, callback) { const self = this; 'use strict'; options = options || {}; // if (!options.emails) { // return callback(new Error('At least one e-mail must be provided to the enable CLA endpoint.')); // } if (!options.legalEntity) { return callback(new Error('The CLA legal entity is required.')); } const available = this.org.isLegacyClaAutomationAvailable(); if (available === 'offline') { return callback(new Error('CLA automation features are temporarily offline.')); } else if (available === false) { return callback(new Error('This organization has not enabled CLA automation features.')); } const legalEntity = options.legalEntity; const claTeams = this.org.getLegacyClaTeams(false /* do not throw if not configured */); let claTeam = claTeams[legalEntity]; if (!claTeam) { return callback(new Error(`No CLA configuration available for the organization and the ${legalEntity} CLA.`)); } const orgDbID = self.org.setting('ossDatabaseId'); if (!orgDbID) { return callback(new Error('No known OSS database ID!!!')); } const claEntities = self.oss.setting('cla').entities; let claData = { repoName: self.name, organizationName: self.org.name, organizationId: orgDbID, description: self.description, isPrivate: self.private || true, repoGitHubId: self.id, webHookId: null, emails: options.emails, legalEntity: legalEntity, licenseId: claEntities[legalEntity].licenseId, createdAt: self.created_at, updatedAt: self.updated_at || Date.now(), sourceUrl: self.html_url, isFork: self.fork || false }; async.waterfall([ function getRepoDetails(callback) { if (claData.repoGitHubId) { // The data for existing repos should be pre-populated. return callback(null); } self.getDetails(() => { // Populate repo details for new repos. claData.description = self.description; claData.isPrivate = self.private || true; claData.repoGitHubId = self.id; claData.createdAt = self.created_at; claData.updatedAt = self.updated_at; claData.sourceUrl = self.html_url; claData.isFork = self.fork || false; callback(null); }); }, function getClaTeam(callback) { self.oss.getTeam(claTeam.id, callback); }, function addRepoToClaTeam(team, callback) { self.oss.insights.trackEvent('AddRepoToClaTeam', { repoName: self.name, claTeamId: claTeam.id }); team.addRepository(self.name, 'push', callback); }, function getRepoWebhooks(response, body, callback) { self.getAllWebhooks(callback); }, function findRepoWebhooksAndDeleteOtherClaWebhooks(webhooks, response, callback) { if (!webhooks || webhooks.length === 0) { return callback(); } return async.eachSeries(webhooks, (webhook, next) => { let webhookUrl = null; if (webhook && webhook.config) { webhookUrl = webhook.config.url; } if (webhookUrl === claEntities[claData.legalEntity].webhookUrl) { // CLA webhook already exists for this CLA entity. claData.webHookId = webhook.id; return next(); } else { const claKeys = Object.keys(claEntities); return async.eachSeries(claKeys, (key, innerNext) => { if (claEntities[key].webhookUrl === webhookUrl) { // Check if there is another existing CLA webhook. self.oss.insights.trackEvent('DeleteClaWebhook', { repoName: self.name, claEntity: key, webhookUrl: webhookUrl }); self.deleteWebhook(webhook.id, innerNext); } else { return innerNext(); } }, next); } }, callback); }, function addClaWebhook(callback) { if (claData.webHookId) { // CLA web hook already exists return callback(null); } self.oss.insights.trackEvent('AddClaWebhook', { repoName: self.name, claEntity: claData.legalEntity, webhookUrl: claEntities[claData.legalEntity].webhookUrl }); self.createWebhook(claEntities[claData.legalEntity].webhookUrl, ['pull_request'], (error, response) => { claData.webHookId = response.id; return callback(null); }); }, function upsertClaReposDataInDb(callback) { self.oss.insights.trackEvent('UpsertClaReposDataInDb', claData); const ossDbClient = self.oss.ossDbClient(); ossManagementDb.upsertClaRepositoryData(ossDbClient, claData, callback); } ], function asyncComplete(error) { if (error) { self.oss.insights.trackException(error, { name: 'EnableLegacyClaAutomationError' }); } return callback(error); }); }; // ---------------------------------------------------------------------------- // Retrieve legacy database settings. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.getLegacyClaSettings = function (callback) { const self = this; const ossDbClient = self.oss.ossDbClient(); ossManagementDb.getClaRepositorySettings(ossDbClient, this.id, callback); }; // ---------------------------------------------------------------------------- // Checks whether there may be a CLA rule assigned by looking for the web hook. // This is a cheap and quick way to do this instead of getting a way to query // the old legacy hub API to see if one exists (since no such API exists yet). // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.hasLegacyClaAutomation = function hasLegacyClaAutomation(callback) { const self = this; self.getWebhooks((error, webhooks) => { if (error || !webhooks) { return callback(utils.wrapError(error, 'Could not retrieve the web hooks to check for CLA automation.')); } for (var i = 0; i < webhooks.length; i++) { var webhook = webhooks[i]; if ( webhook && webhook.config && webhook.config.url && webhook.config.url === 'https://cla.microsoft.com/webhooks/pullrequest' || webhook.config.url === 'https://cla.azure.com/webhooks/pullrequest' || webhook.config.url === 'https://cla2.msopentech.com/webhooks/pullrequest' || webhook.config.url === 'https://cla2.dotnetfoundation.org/webhooks/pullrequest') { var legalEntity = 'Unknown or former legal entity'; if (webhook.config.url === 'https://cla.microsoft.com/webhooks/pullrequest' || webhook.config.url === 'https://cla.azure.com/webhooks/pullrequest') { legalEntity = 'Microsoft'; } else if (webhook.config.url === 'https://cla2.msopentech.com/webhooks/pullrequest') { legalEntity = 'Microsoft Open Technologies, Inc.'; } else if (webhook.config.url === 'https://cla2.dotnetfoundation.org/webhooks/pullrequest') { legalEntity = '.NET Foundation'; } return callback(null, true, webhook.config.url, legalEntity, 'https://opensource.microsoft.com/resources'); } } return callback(null, false); }); }; // ---------------------------------------------------------------------------- // Update the repo properties with a patch. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.update = function updateRepo(patch, callback) { // CONSIDER: Wrap errors. this.createGitHubRepoClient().update(patch, callback); }; // ---------------------------------------------------------------------------- // Delete the repo from GitHub. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.delete = function deleteRepo(callback) { // CONSIDER: Wrap errors. this.createGitHubRepoClient().destroy(callback); }; // ---------------------------------------------------------------------------- // Retrieve a repo-scoped issue object. // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.issue = function getIssueInstance(issueNumber, optionalInitialData) { var self = this; if (typeof issueNumber != 'string') { issueNumber = issueNumber.toString(); } if (self.inner.issues[issueNumber]) { return self.inner.issues[issueNumber]; } else { self.inner.issues[issueNumber] = new Issue(self, issueNumber, optionalInitialData); return self.inner.issues[issueNumber]; } }; // CONSIDER: OLD: Is this needed still? OpenSourceRepo.prototype.createIssue = function (issue, callback) { var fullName = this.full_name; var repositoryClient = this.oss.createGenericGitHubClient().repo(fullName); repositoryClient.createIssue(issue, function (error, createdIssue) { if (error) { error = utils.wrapError(error, 'We had trouble opening an issue to track this request in the ' + fullName + ' repo.'); } callback(error, createdIssue); }); }; // CONSIDER: OLD: Is this needed still? OpenSourceRepo.prototype.updateIssue = function (issueNumber, patch, callback) { var fullName = this.full_name; var issueClient = this.oss.createGenericGitHubClient().issue(this.full_name, issueNumber); issueClient.update(patch, function (error, updatedIssue) { if (error) { error = utils.wrapError(error, 'We had trouble updated the issue in the ' + fullName + ' repo.'); } callback(error, updatedIssue); }); }; function setDetails(self, details) { 'use strict'; let knownKeys = new Set(); var key = null; for (var i = 0; i < detailsToCopy.length; i++) { key = detailsToCopy[i]; self[key] = details[key]; knownKeys.add(key); } for (i = 0; i < detailsToSkip.length; i++) { key = detailsToSkip[i]; self.otherFields[key] = details[key]; knownKeys.add(key); } for (var k in details) { if (!knownKeys.has(k)) { debug('Repo details import, remaining key: ' + k); } } self._detailsLoaded = true; } // ---------------------------------------------------------------------------- // Add new file into a repo // ---------------------------------------------------------------------------- OpenSourceRepo.prototype.createContents = function (alternateToken, path, message, content, cbOrBranchOrOptions, cb) { this.createGitHubRepoClient(alternateToken).createContents(path, message, content, cbOrBranchOrOptions, cb); }; module.exports = OpenSourceRepo;<file_sep>/views/org/team/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout //- TODO: Add back team maintainer management mixin authenticGitHubLink() ul.list-unstyled li: a.btn.btn-sm( href='https://github.com/orgs/' + organization.name + '/teams/' + team2.slug, target="_blank", class=admin ? 'btn-primary' : 'btn-muted-more' ) = admin ? 'Manage on GitHub.com ' : 'Open on GitHub.com ' != octicon('mark-github', 18) if teamDetails && teamDetails.privacy === 'secret' && !membershipStatus li: p: small: p This team is marked as #[strong secret]. On GitHub you may see a 404 error unless you join the team. mixin membersList(typeOfList, membersList, isAdmin, moreMembersToShow) ul.list-inline(style='margin-bottom:24px') each member in membersList li(style='vertical-align:top;width:370px') ul.list-inline if member.avatar_url //- img-circle li(style='vertical-align:top;margin-top:12px'): img( alt=member.login, src=member.avatar_url + '&s=96', style='width:36px;height:36px') li - var primaryName = member.link ? (member.link.aadname || member.link.aadupn) : member.login ul.list-unstyled(style='margin-right:16px') li: h5 a(href='/people?q=' + member.login)= primaryName if orgOwnersSet && orgOwnersSet.has(member.id) = ' ' .label.label-info(title=organization.name + ' organization owner') Owner ul.list-inline if primaryName != member.login li: p= member.login if member.mailAddress li: a(href='mailto:' + member.mailAddress, title='Send corporate email to ' + member.mailAddress) != octicon('mail', 16) if isAdmin === true && typeOfList === 'member' //- Member admin if !maintainersSet.has(member.id) li: ul.list-inline li: form(method='post', action=teamUrl + 'maintainers/add') input(type='hidden', name='username', value=member.login) button.btn.btn-xs.btn-muted-more( onclick='return confirm(\'Are you sure that you want to promote ' + member.login + ' to be a team maintainer?\');' title='Add team maintainer rights for this user') Promote to maintainer li: form(method='post', action=teamUrl + 'members/remove') input(type='hidden', name='username', value=member.login) button.btn.btn-xs.btn-muted-more( onclick='return confirm(\'Are you sure that you want to remove ' + member.login + ' from the team?\');' title='Remove this user from the team') Remove if isAdmin === true && typeOfList === 'maintainer' //- Maintainer admin //- If the current member is also the current site user if orgOwnersSet && orgOwnersSet.has(member.id) //- Do not show "downgrade" of org owners else if member.id == user.github.id //- a.btn.btn-xs.btn-muted-more(href=teamUrl + 'maintainers/transfer/') Transfer role if membersList.length > 1 //- If there are already 1 other maintainer (total of 2), it's OK for the current user to drop themselves from the team. li: form(method='post', action=teamUrl + 'maintainers/' + member.id + '/downgrade') button.btn.btn-xs.btn-muted-more( href=teamUrl + 'maintainers/' + member.id + '/downgrade', onclick='return confirm(\'Are you sure that you want to downgrade your own team maintainer account to be a member? You will no longer be able to maintain this team on GitHub.\');') Downgrade yourself else if membersList.length > 1 li: form(method='post', action=teamUrl + 'maintainers/' + member.id + '/downgrade') button.btn.btn-xs.btn-muted-more( onclick='return confirm(\'Are you sure that you want to downgrade this maintainer to a member?\');' title='Remove team maintainer rights from this user') Downgrade if moreMembersToShow li: div(style='width:160px') p.lead.text-primary.text-center: small &hellip; and others block content //- Variables - var maximumMembersToShow = (4 * 3) - 1 - var maximumRepositoriesToShow = 5 //- View services - var languageColor = viewServices.languageColor - var octicon = viewServices.octicon //- Mode variables - var admin = teamPermissions.allowAdministration div.container .row: .col-md-12 h1 = team.name if admin = ' ' if isOrgOwner .label.label-info.shrink66( title='You are an organization owner' )= 'Organization owner' else .label.label-warning.shrink66( title=teamPermissions.sudo ? 'As a sudo maintainer, you can administer team settings in this application but not directly on GitHub.com' : 'As a team maintainer you can administer team settings in this application and directly on GitHub.com' )= teamPermissions.sudo ? 'Sudo maintainer' : 'Team maintainer' if membershipStatus === 'member' = ' ' .label.label-muted.shrink66 Member if isSystemTeam = ' ' .label.label-danger.shrink66 System team h5= organization.name + ' organization' if team.description p.lead= team.description .row .col-md-8 //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- JOIN //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if !membershipStatus //- TODO: If they are not an org member, figure out what to do! .panel.panel-default .panel-heading h3.panel-title if isSystemTeam | You are not a member of this team else if isBroadAccessTeam | Become a team member now else | Request to become a team member .panel-body if isBroadAccessTeam p. You can immediately join this team, it is designed to provide broad access to organization members. else if isSystemTeam p As a system team, this team's membership is closed. else p. You're not currently a member of this team. You can submit a request to join the team. A team maintainer will review your ask. p if isSystemTeam a.btn.btn-sm.btn-default(href='#', disabled=true) Membership is closed else a.btn.btn-sm.btn-primary(href=teamUrl + 'join') Join this team //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- TEAM MAINTAINERS //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if maintainers h2 = 'Team Maintainers ' small: a(href=teamUrl + 'maintainers/refresh', title='Refresh the team maintainers list') i.glyphicon.glyphicon-refresh if maintainers.length <= 0 div.alert.alert-danger(role='alert') strong This team does not have any maintainers br | Without team maintainers members cannot request access to this team and it is unclear who the owner of the repository is. else if maintainers.length < 2 div.alert.alert-warning(role='alert') strong This team only has a single maintainer br | It is strongly recommended that a team have multiple maintainers to make it easier to stay on top of permission requests. +membersList('maintainer', maintainers, admin) if admin ul.list-inline li: a.btn.btn-sm.btn-muted(href=teamUrl + 'maintainers/add') Add a team maintainer //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- TEAM MEMBERS //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if membersFirstPage h2 = 'Members ' small: a(href=teamUrl + 'members/refresh', title='Refresh the team members list') i.glyphicon.glyphicon-refresh if membersFirstPage.length <= 0 p This team has no members. else - var membersSubset = membersFirstPage.slice(0, maximumMembersToShow) - var hasMore = membersFirstPage.length > membersSubset.length +membersList('member', membersSubset, admin, hasMore) ul.list-inline if membersFirstPage.length li: a.btn.btn-sm.btn-muted(href=teamUrl + 'members/browse/', title='Only a subset of members are listed on this page. A full view is also available.') if teamDetails && teamDetails.members_count = 'Browse all ' + teamDetails.members_count.toLocaleString() + ' members' else | Browse all team members if admin li: a.btn.btn-sm.btn-muted(href=teamUrl + 'members/add') Add a member //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- REPOSITORIES //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - hr h2 Repositories if repositories && teamDetails && teamDetails.repos_count - var actualCount = repositories.length - var countDifference = teamDetails.repos_count - actualCount if countDifference <= 0 p. This team provides access to #{teamDetails.repos_count.toLocaleString()} #{teamDetails.repos_count === 1 ? 'repository' : 'repositories'}. else p. This team provides access to #{actualCount.toLocaleString()} #{actualCount === 1 ? 'repository' : 'repositories'} and #{countDifference.toLocaleString()} #{actualCount === 1 ? 'fork' : 'forks'}. else p This team does not offer access to any repositories. if repositories && repositories.length - var repoCount = 0 - var localOrgName = organization.name - var specificTeam = team2 each repositoryPermission in repositories if (repoCount++ <= maximumRepositoriesToShow) - var repo = repositoryPermission.repository - var repoPermissions = repositoryPermission.permissions .repo(id=repo.name, style='padding-bottom:36px;display:block') h3 a(href='/' + localOrgName + '/repos/' + repo.name)= repo.name if repo.private === true | &nbsp; .label.label-warning.shrink66(title='This is a private repository and not open source.') Private if repoPermissions && specificTeam = ' ' if repoPermissions.admin .label.label-danger.shrink66(title='The ' + specificTeam.name + ' team grants permission to administer the repo') Admin else if repoPermissions.push .label.label-warning.shrink66(title='The ' + specificTeam.name + ' team grants permission to commit directly to the repo and accept pull requests') Write else if repoPermissions.pull .label.label-success.shrink66(title='The ' + specificTeam.name + ' team grants permission to clone/read/see the repo') Read if repo.description p=repo.description small ul.list-inline.list-horizontal-space if repo.language li span(style={color: languageColor(repo.language)}) != octicon('primitive-dot', 10) = ' ' + repo.language if repo.stargazers_count li != octicon('star', 15) = ' ' + repo.stargazers_count.toLocaleString() if repo.forks_count li != octicon('repo-forked', 12) = ' ' + repo.forks_count.toLocaleString() if repositories.length > maximumRepositoriesToShow h3 and more &hellip; p: a.btn.btn-sm.btn-muted(href=teamUrl + 'repos/') Browse all #{repositories.length} repositories assigned to the team .col-md-4(style='margin-top:24px') //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- AUTHENTIC GITHUB EXPERIENCE //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if !admin +authenticGitHubLink() //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- ADMINISTRATION //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if admin if pendingApprovals && pendingApprovals.length && pendingApprovals.length > 0 h2 Permission requests p: a.btn.btn-sm.alert-danger(href=teamUrl + 'approvals/') strong= pendingApprovals.length | &nbsp; | Pending Approval#{pendingApprovals.length > 1 ? 's' : ''} h3 Team management +authenticGitHubLink() ul.list-unstyled.list-vspace li: a.btn.btn-muted.btn-sm(href=teamUrl + 'properties/', title='Manage the basic GitHub properties for this team') Team name & description li: a.btn.btn-muted.btn-sm(href=teamUrl + 'repos/', title='Take a repo from private to public, delete old repos, change repo descriptions. Manage repo collaborators.') Manage repositories li: a.btn.btn-muted.btn-sm(href=teamUrl + 'members/browse', title='Invite new members to your team. Remove members from the team. Remove employees from the entire org when they leave the company.') Manage members li: a.btn.btn-muted.btn-sm(href=teamUrl + 'members/browse?twoFactor=off', title='Identify non-compliant members of your team who have two-factor authentication disabled.') Two-factor check li: form(method='post', action=teamUrl + 'delete') button.btn.btn-sm.btn-muted( onclick='return confirm(\'Are you sure that you want to delete this team? This is a permanent operation.\');' title='Permanently delete the team') Delete team hr small ul.list-unstyled li This team has the GitHub ID of #{team.id} <file_sep>/views/head.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- head - var resourceVersion = '4a' meta(charset='utf-8') //-title= (user && user.primaryUsername) ? title + ' - ' + user.primaryUsername : title title= title meta(http-equiv='X-UA-Compatible', content='IE=edge') meta(name='viewport', content='width=device-width, initial-scale=1.0') meta(name='author', content='Microsoft Open Source') link(href='/css/bootstrap.min.css?' + resourceVersion, rel='stylesheet') link(href='/repos-css/oss.css?' + resourceVersion, rel='stylesheet') link(href='/css/octicons.css?' + resourceVersion, rel='stylesheet') link(rel='shortcut icon', href='/favicon.ico') link(rel='apple-touch-icon', sizes='114x114,72x72,144x144,60x60,120x120,76x76,152x152,180x180', href='/favicon-144.png') meta(name='msapplication-config', content='none') //[if lt IE 9] <script src="https://ajax.aspnetcdn.com/ajax/respond/1.4.2/respond.min.js"/> [endif] block additional_head block js script(type='text/javascript', src='/js/jquery.min.js') script(type='text/javascript', src='/js/bootstrap.min.js') script(type='text/javascript', src='/js/jquery.timeago.js') script(type='text/javascript', src='/js/jquery.uitablefilter.js') | <script type='text/javascript'> | $(document).ready(function() { block js_doc_ready | jQuery('time').timeago(); | }); | </script> <file_sep>/business/graphManager.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const moment = require('moment'); class GraphManager { constructor(operations, options) { setRequiredProperties(this, ['github', 'config', 'redis', 'insights'], options); _private(this).operations = operations; return this; } getCachedLink(githubId, options, callback) { // Advice: this function is designed for efficiently at this time // and not ensuring a link, since it uses a cache system. For // making actual link calls, it would be best to use an alternate // call. if (!callback && typeof(options) === 'function') { callback = options; options = null; } options = options || {}; const localCacheMaxAgeSeconds = options.localMaxAgeSeconds || 30; // 30s const remoteCacheMaxAgeSeconds = options.maxAgeSeconds || 60; // 1m let backgroundRefresh = options.backgroundRefresh !== undefined ? options.backgroundRefresh : true; getCachedLinksMap(this, localCacheMaxAgeSeconds, remoteCacheMaxAgeSeconds, backgroundRefresh, (error, map) => { return error ? callback(error) : callback(null, map.get(githubId)); }); } getMember(githubId, options, callback) { const self = this; self.getMembers(options, (error, allMembers) => { if (error) { return callback(error); } githubId = typeof(githubId) === 'string' ? parseInt(githubId, 10) : githubId; const member = raiseCrossOrganizationSingleResult(allMembers.get(githubId)); return callback(null, member); }); } getUserTeams(githubId, options, callback) { const self = this; self.getTeamsWithMembers(options, (error, everything) => { if (error) { return callback(error); } githubId = typeof(githubId) === 'string' ? parseInt(githubId, 10) : githubId; const teams = []; for (let i = 0; i < everything.length; i++) { const oneTeam = everything[i]; if (oneTeam && oneTeam.members) { for (let j = 0; j < oneTeam.members.length; j++) { if (githubId === oneTeam.members[j].id) { const teamClone = Object.assign({}, oneTeam); oneTeam.organization = { login: oneTeam.organization.login, }; delete teamClone.members; teams.push(teamClone); break; } } } } return callback(null, teams); }); } getTeamsWithMembers(options, callback) { if (typeof(options) === 'function' && !callback) { callback = options; options = null; } options = options || {}; if (!options.maxAgeSeconds) { options.maxAgeSeconds = 60 * 30 * 48 * 10 /* 2 WEEKS */ /* 2 DAYS */ /* 30m per-org full team members list OK */; } if (options.backgroundRefresh === undefined) { options.backgroundRefresh = true; } options.individualMaxAgeSeconds = 7 * 24 * 60 * 60; // One week _private(this).operations.getTeamsWithMembers(null, options, callback); } getUserReposByTeamMemberships(githubId, options, callback) { const self = this; self.getUserTeams(githubId, {}, (error, everything) => { if (error) { return callback(error); } const teams = new Set(); for (let i = 0; i < everything.length; i++) { teams.add(everything[i].id); } self.getReposWithTeams(options, (getReposError, allRepos) => { if (getReposError) { return callback(getReposError); } const repos = []; for (let i = 0; i < allRepos.length; i++) { const repo = allRepos[i]; if (repo && repo.teams) { const userTeams = []; let bestPermission = null; for (let j = 0; j < repo.teams.length; j++) { const t = repo.teams[j]; if (teams.has(t.id)) { if (repo.private === false && t.permission === 'pull') { // Public repos, ignore teams with pull access } else { userTeams.push(t); if (isPermissionBetterThan(bestPermission, t.permission)) { bestPermission = t.permission; } } } } if (userTeams.length > 0) { const personalizedRepo = { personalized: { teams: userTeams, permission: bestPermission, }, }; const repoClone = Object.assign(personalizedRepo, repo); delete repoClone.teams; repos.push(repoClone); } } } return callback(null, repos); }); }); } getReposWithTeams(options, callback) { if (typeof(options) === 'function' && !callback) { callback = options; options = null; } options = options || {}; if (!options.maxAgeSeconds) { options.maxAgeSeconds = 60 * 20 /* 20m per-org collabs list OK */; } options.individualMaxAgeSeconds = 7 * 24 * 60 * 60; // One week if (options.backgroundRefresh === undefined) { options.backgroundRefresh = true; } _private(this).operations.getRepoTeams(null, options, callback); } getReposWithCollaborators(options, callback) { if (typeof(options) === 'function' && !callback) { callback = options; options = null; } options = options || {}; if (!options.maxAgeSeconds) { options.maxAgeSeconds = 60 * 20 /* 20m per-org collabs list OK */; } options.individualMaxAgeSeconds = 7 * 24 * 60 * 60; // One week if (options.backgroundRefresh === undefined) { options.backgroundRefresh = true; } _private(this).operations.getRepoCollaborators(null, options, callback); } getMembers(options, callback) { if (typeof(options) === 'function' && !callback) { callback = options; options = null; } options = options || {}; if (!options.maxAgeSeconds) { options.maxAgeSeconds = 60 * 10 /* 10m per-org members list OK */; } if (options.backgroundRefresh === undefined) { options.backgroundRefresh = true; } _private(this).operations.getMembers(null, options, callback); } } function setRequiredProperties(self, properties, options) { for (let i = 0; i < properties.length; i++) { const key = properties[i]; if (!options[key]) { throw new Error(`Required option with key "${key}" was not provided.`); } self[key] = options[key]; } } function isPermissionBetterThan(currentBest, newConsideration) { switch (newConsideration) { case 'admin': return true; case 'push': if (currentBest !== 'admin') { return true; } break; case 'pull': if (currentBest === null) { return true; } break; default: throw new Error(`Invalid permission type ${newConsideration}`); } return false; } module.exports = GraphManager; function raiseCrossOrganizationSingleResult(result, keyProperty) { keyProperty = keyProperty || 'id'; if (!result || !result[keyProperty] || !result.orgs) { return; } const parentValue = result[keyProperty]; const clone = Object.assign({}, result); clone.orgs = []; let copiedFirst = false; for (const orgName of Object.getOwnPropertyNames(result.orgs)) { const orgResult = result.orgs[orgName]; if (!orgResult[keyProperty]) { throw new Error(`The result for the "${orgName}" org does not have a key property, "${keyProperty}".`); } if (orgResult[keyProperty] !== parentValue) { throw new Error(`The result for the "${orgName}" org key property, "${keyProperty}" does not match the parent key value.`); } if (orgResult.orgs) { throw new Error(`The result for the "${orgName}" org has a nested 'orgs' property, which is not allowed.`); } if (!copiedFirst) { Object.assign(clone, orgResult); copiedFirst = true; } clone.orgs.push(orgName); } return clone; } function getCachedLinksMap(self, maxAgeSecondsLocal, maxAgeSecondsRemote, backgroundRefresh, callback) { const privates = _private(self); const operations = privates.operations; if (!privates.linksCache) { privates.linksCache = {}; } let linksCache = privates.linksCache; const now = moment(); const beforeNow = moment().subtract(maxAgeSecondsLocal, 'seconds'); let isCacheValid = linksCache.map && linksCache.updated && beforeNow.isAfter(linksCache.updated); if (isCacheValid) { return callback(null, linksCache.map); } const remoteOptions = { backgroundRefresh: backgroundRefresh, maxAgeSeconds: maxAgeSecondsRemote, // Include all available information includeNames: true, includeId: true, includeServiceAccounts: true, }; operations.getLinks(remoteOptions, (getLinksError, links) => { if (getLinksError) { return callback(getLinksError); } const map = new Map(); for (let i = 0; i < links.length; i++) { let id = links[i].ghid; if (id) { id = parseInt(id, 10); map.set(id, links[i]); } } if (linksCache.map && linksCache.updated.isAfter(now)) { // Abandon this update, a newer update has already returned } else { linksCache.updated = now; linksCache.map = map; } return callback(null, linksCache.map); }); } const privateSymbol = Symbol(); function _private(self) { if (self[privateSymbol] === undefined) { self[privateSymbol] = {}; } return self[privateSymbol]; } <file_sep>/views/email/link.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends body block content if link.serviceAccount h4 Service Account linked h3 Congratulations! p Your GitHub account is now linked with your corporate identity. table tbody if link.serviceAccount tr td Service Account td= link.aadupn if link.serviceAccountMail td Contact e-mail td= link.serviceAccountMail tr td GitHub account td= link.ghu tr td Corporate identity td= link.aadupn h3 Microsoft open source resources p. Take a moment to review the resources at #[a(href='https://opensource.microsoft.com/resources') https://opensource.microsoft.com/resources]: ul li Common GitHub questions, policies and resources relating to releasing open source li Service account guidance li Contributor license agreement info <file_sep>/routes/index-linked.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const router = express.Router(); const approvalsSystem = require('./approvals'); const orgsRoute = require('./orgs'); const orgAdmin = require('./orgAdmin'); const peopleRoute = require('./people'); const reposRoute = require('./repos'); const teamsRoute = require('./teams'); const unlinkRoute = require('./unlink'); const utils = require('../utils'); //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // SECURITY ROUTE MARKER: // Below this next call, all routes will require an active link to exist for // the authenticated GitHub user. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- router.use(function (req, res, next) { const config = req.app.settings.runtimeConfig; const link = req.oss.entities.link; if (link && link.ghu) { next(); } else if (config.authentication.scheme !== 'aad') { const error = new Error('Not found (not a corporate authenticated user).'); error.status = 404; error.originalUrl = req.originalUrl; error.skipLog = true; error.detailed = 'You are not currently signed in as a user with a "linked" corporate identity, FYI.'; next(error); } else { utils.storeOriginalUrlAsVariable(req, res, 'beforeLinkReferrer', '/', 'no linked github username'); } }); // end security route //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- router.use('/unlink', unlinkRoute); router.use('/teams', teamsRoute); router.use('/approvals', approvalsSystem); router.use('/organization', orgAdmin); router.use('/people', peopleRoute); router.use('/repos', reposRoute); router.use('/', orgsRoute); module.exports = router; <file_sep>/resources/less/_build.less @import "../../bower_components/bootstrap/less/bootstrap.less"; @import "../../bower_components/bootswatch/yeti/variables.less"; @import "../../bower_components/bootswatch/yeti/bootswatch.less"; <file_sep>/routes/teams.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const router = express.Router(); const lowercaser = require('../middleware/lowercaser'); router.use(function (req, res, next) { req.oss.addBreadcrumb(req, 'Teams'); req.reposContext = { section: 'teams', pivotDirectlyToOtherOrg: '/teams/', // hack }; req.teamsPagerMode = 'orgs'; next(); }); router.get('/', lowercaser(['sort', 'set']), require('./teamsPager')); module.exports = router; <file_sep>/routes/api/apiUserContext.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const jsonError = require('./jsonError'); const OpenSourceUserContext = require('../../lib/context'); module.exports = function prepareUserContext(req, res, next) { const options = { config: req.app.settings.runtimeConfig, dataClient: req.app.settings.dataclient, redisClient: req.app.settings.dataclient.cleanupInTheFuture.redisClient, redisHelper: req.app.settings.redisHelper, githubLibrary: req.app.settings.githubLibrary, ossDbClient: req.app.settings.ossDbConnection, request: req, insights: req.insights, }; new OpenSourceUserContext(options, (error, instance) => { req.oss = instance; if (error && (error.tooManyLinks === true || error.anotherAccount === true)) { return next(jsonError(error, 400)); } return next(); }); }; <file_sep>/routes/orgAdmin.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); const async = require('async'); const github = require('octonode'); const utils = require('../utils'); // TODO: Refactor OSS user to better be able to remove the user using the central codepath. // These functions are not pretty. router.use(function ensureOrganizationSudoer(req, res, next) { req.oss.isPortalAdministrator(function (error, isAdmin) { if (isAdmin === true) { return next(); } next(utils.wrapError(null, 'These aren\'t the droids you are looking for. You do not have permission to be here.', true)); }); }); router.get('/', function (req, res) { req.oss.render(req, res, 'organization/index', 'Organization Dashboard'); }); function tryGetGithubUserIdFromUsernameLink(dc, config, oldGithubUsername, callback) { dc.getUserLinkByProperty('ghu', oldGithubUsername, (getError, links) => { if (!getError && links && links.length === 0) { getError = new Error(`No link found by searching for the old username "${oldGithubUsername}".`); } if (!getError && links && links.length > 1) { getError = new Error(`While searching for a link by the old username "${oldGithubUsername}" there were ${links.length} results instead of 1.`); } if (getError) { return callback(getError); } callback(null, links[0]); }); } function whoisById(dc, config, githubId, userInfo, callback) { if (userInfo && userInfo.ghid && userInfo.ghu) { // Rename scenario; pass back the link return callback(null, userInfo); } dc.getLink(githubId, function (error, ok) { if (ok) { ok = dc.reduceEntity(ok); } else { ok = { githubInfoButNoLink: userInfo }; } return callback(error, ok); }); } function expandAllInformation(req, dc, config, entity, callback) { var oss = req.oss; var orgsList = oss.orgs(); var orgsUserIn = []; const ghid = entity.ghid || (entity.githubInfoButNoLink ? entity.githubInfoButNoLink.id : undefined); oss.getGithubUsernameFromId(ghid, (getUsernameError, username) => { if (getUsernameError) { return callback(getUsernameError); } if (entity && entity.ghu !== undefined && entity.ghu !== username) { entity.renamedUserMessage = `This user used to be known as "${entity.ghu}" on GitHub but changed their username to "${username}".`; entity.ghu = username; } async.each(orgsList, function (org, callback) { org.queryAnyUserMembership(username, function (err, membership) { if (membership && membership.state) { orgsUserIn.push(org); } callback(null, membership); }); }, function (expansionError) { entity.orgs = orgsUserIn; callback(expansionError, entity); }); }); } function getPersonServiceEntryByUpn(redisClient, upn, callback) { redisClient.hget('upns', upn, (redisGetError, data) => { if (redisGetError) { return callback(redisGetError); } var person = null; if (data) { try { person = JSON.parse(data); } catch (jsonError) { return callback(jsonError); } } if (person) { return callback(null, person); } return callback(null, null); }); } function getRealtimeAadIdInformation(req, anyInfo, callback) { if (!anyInfo || !anyInfo.aadoid) { return callback(); } const graphProvider = req.app.settings.graphProvider; if (!graphProvider) { return callback(); } const aadId = anyInfo.aadoid; graphProvider.getUserAndManagerById(aadId, callback); } router.get('/whois/aad/:upn', function (req, res, next) { var config = req.app.settings.runtimeConfig; var dc = req.app.settings.dataclient; var redisClient = req.app.settings.dataclient.cleanupInTheFuture.redisClient; var upn = req.params.upn; var oss = req.oss; dc.getUserByAadUpn(upn, function (error, usr) { if (error) { error.skipLog = true; return next(error); } if (usr.length && usr.length > 0) { expandAllInformation(req, dc, config, usr[0], function (error, z) { getPersonServiceEntryByUpn(redisClient, upn, (getInformationError, personEntry) => { getRealtimeAadIdInformation(req, z, (ignore, realtimeGraph) => { oss.render(req, res, 'organization/whois/result', 'Whois by AAD UPN: ' + upn, { personEntry: personEntry, upn: upn, info: z, realtimeGraph: realtimeGraph, }); }); }); }); } else { return next(utils.wrapError(null, 'User not found', true)); } }); }); router.get('/errors/active', function (req, res, next) { var dc = req.app.settings.dataclient; var oss = req.oss; dc.getActiveErrors(function (error, errors) { if (error) { return next(error); } oss.render(req, res, 'organization/errorsList', 'Untriaged errors', { errors: errors, }); }); }); router.post('/errors/:partition/:row', function (req, res, next) { var partitionKey = req.params.partition; var errorId = req.params.row; var action = req.body.action; var dc = req.app.settings.dataclient; if (action == 'Archive') { dc.updateError(partitionKey, errorId, { 'new': false }, function (error) { if (error) { return next(error); } req.oss.saveUserAlert(req, 'Error ' + partitionKey + '/' + errorId + ' triaged.', 'Marked as no longer a new error instance', 'success'); res.redirect('/organization/errors/active/'); }); } else if (action == 'Delete') { dc.removeError(partitionKey, errorId, function (error) { if (error) { return next(error); } req.oss.saveUserAlert(req, 'Error ' + partitionKey + '/' + errorId + ' deleted.', 'Deleted', 'success'); res.redirect('/organization/errors/active/'); }); } else { return next(new Error('Action not supported: ' + action)); } }); router.get('/whois/id/:githubid', function (req, res) { var config = req.app.settings.runtimeConfig; var dc = req.app.settings.dataclient; var id = req.params.githubid; var oss = req.oss; whoisById(dc, config, id, undefined, function (error, userInfoFinal) { expandAllInformation(req, dc, config, userInfoFinal, function(error, z) { getRealtimeAadIdInformation(req, z, (ignore, realtimeGraph) => { oss.render(req, res, 'organization/whois/result', 'Whois by GitHub ID: ' + req.params.githubid, { info: z, postUrl: '/organization/whois/id/' + id, realtimeGraph: realtimeGraph, }); }); }); }); }); function getGithubUserInformationAndTryKnownOldName(dc, config, githubOrgClient, username, callback) { var ghuser = githubOrgClient.user(username); ghuser.info(function (error, userInfo) { if (error && error.statusCode === 404) { return tryGetGithubUserIdFromUsernameLink(dc, config, username, (tryGetError, userLink) => { if (tryGetError) { return callback(tryGetError); } return callback(null, userLink); }); } if (error) { return callback(error); } callback(null, userInfo); }); } router.get('/whois/github/:username', function (req, res, next) { var config = req.app.settings.runtimeConfig; var dc = req.app.settings.dataclient; var redisClient = req.app.settings.dataclient.cleanupInTheFuture.redisClient; var username = req.params.username; var githubOrgClient = github.client(config.github.complianceToken || config.github.organizations[0].ownerToken); var oss = req.oss; getGithubUserInformationAndTryKnownOldName(dc, config, githubOrgClient, username, (error, userInfo) => { if (error) { error.skipLog = true; return next(error); } var id = userInfo.id || userInfo.ghid; whoisById(dc, config, id, userInfo, function (error, userInfoFinal) { expandAllInformation(req, dc, config, userInfoFinal, function(error, z) { var upn = userInfoFinal ? userInfoFinal.aadupn : 'unknown-upn'; getPersonServiceEntryByUpn(redisClient, upn, (getInformationError, personEntry) => { getRealtimeAadIdInformation(req, z, (ignore, realtimeGraph) => { oss.render(req, res, 'organization/whois/result', 'Whois: ' + (z.ghu || username), { info: z, personEntry: personEntry, realtimeGraph: realtimeGraph, }); }); }); }); }); }); }); router.post('/whois/github/:username', function (req, res, next) { var config = req.app.settings.runtimeConfig; var dc = req.app.settings.dataclient; var username = req.params.username; var githubOrgClient = github.client(config.github.complianceToken || config.github.organizations[0].ownerToken); var ghuser = githubOrgClient.user(username); const oss = req.oss; const markAsServiceAccount = req.body['mark-as-service-account']; const unmarkServiceAccount = req.body['unmark-service-account']; ghuser.info(function (error, userInfo) { if (error) { return next(error); } var id = userInfo.id; whoisById(dc, config, id, userInfo, function (error, userInfoFinal) { if (userInfoFinal && userInfoFinal.githubInfoButNoLink !== undefined) { userInfoFinal.ghu = userInfoFinal.githubInfoButNoLink.login; userInfoFinal.ghid = userInfoFinal.githubInfoButNoLink.id; } if (markAsServiceAccount || unmarkServiceAccount) { return modifyServiceAccount(dc, userInfoFinal, markAsServiceAccount, req, res, next); } oss.processPendingUnlink(userInfoFinal, (ignoredError, results) => { oss.render(req, res, 'organization/whois/drop', `Dropped ${username}`, { results: results, entity: userInfoFinal, }); }); }); }); }); function modifyServiceAccount(dc, linkSubset, markAsServiceAccount, req, res, next) { const oss = req.oss; dc.getLink(linkSubset.ghid, function (findError, link) { if (findError) { return next(findError); } link = dc.reduceEntity(link); if (markAsServiceAccount) { link.serviceAccount = true; } else { delete link.serviceAccount; } dc.updateLink(linkSubset.ghid, link, (updateError) => { if (updateError) { return next(updateError); } oss.invalidateLinkCache('aad', link.aadoid || 'no-aad-oid', () => { res.json(link); }); }); }); } module.exports = router; <file_sep>/business/repository.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const wrapError = require('../utils').wrapError; const common = require('./common'); const Collaborator = require('./collaborator'); const RepositoryPermission = require('./repositoryPermission'); const TeamPermission = require('./teamPermission'); const githubEntityClassification = require('../data/github-entity-classification.json'); const repoPrimaryProperties = githubEntityClassification.repo.keep; const repoSecondaryProperties = githubEntityClassification.repo.strip; class Repository { constructor(organization, entity, getToken, operations) { this.organization = organization; if (entity) { common.assignKnownFields(this, entity, 'repository', repoPrimaryProperties, repoSecondaryProperties); } const privates = _private(this); privates.getToken = getToken; privates.operations = operations; } getDetails(options, callback) { if (!callback && typeof(options) === 'function') { callback = options; options = null; } options = options || {}; const self = this; const token = _private(this).getToken(); const operations = _private(this).operations; const parameters = { owner: this.organization.name, repo: this.name, }; const cacheOptions = { maxAgeSeconds: options.maxAgeSeconds || operations.defaults.orgRepoDetailsStaleSeconds, }; if (options.backgroundRefresh !== undefined) { cacheOptions.backgroundRefresh = options.backgroundRefresh; } return operations.github.call(token, 'repos.get', parameters, cacheOptions, (error, entity) => { if (error) { const notFound = error.code && error.code === 404; return callback(wrapError(error, notFound ? 'The repo could not be found.' : 'Could not get details about the repo.', notFound)); } common.assignKnownFields(self, entity, 'repository', repoPrimaryProperties, repoSecondaryProperties); callback(null, entity); }); } getBranches(cacheOptions, callback) { if (!callback && typeof (cacheOptions) === 'function') { callback = cacheOptions; cacheOptions = null; } cacheOptions = cacheOptions || {}; const privates = _private(this); const operations = privates.operations; const token = privates.getToken(); const github = operations.github; const parameters = { owner: this.organization.name, repo: this.name, per_page: 100, }; if (cacheOptions.protected !== undefined) { parameters.protected = cacheOptions.protected; } delete cacheOptions.protected; if (!cacheOptions.maxAgeSeconds) { cacheOptions.maxAgeSeconds = operations.defaults.repoBranchesStaleSeconds; } if (cacheOptions.backgroundRefresh === undefined) { cacheOptions.backgroundRefresh = true; } // NOTE: This method does not return a strongly-typed "branch" object or anything like that return github.collections.getRepoBranches( token, parameters, cacheOptions, callback); } getContent(path, options, callback) { if (!callback && typeof (options) === 'function') { callback = options; options = null; } options = options || {}; const ref = options.branch || options.tag || options.ref || 'master'; const parameters = { owner: this.organization.name, repo: this.name, path: path, ref: ref, }; const token = _private(this).getToken(); const operations = _private(this).operations; return operations.github.call(token, 'repos.getContent', parameters, (error, content) => { if (error) { return callback(error); } callback(null, content); }); } getCollaborator(username, cacheOptions, callback) { if (!callback && typeof (cacheOptions) === 'function') { callback = cacheOptions; cacheOptions = null; } cacheOptions = cacheOptions || {}; const privates = _private(this); const operations = privates.operations; const token = privates.getToken(); const github = operations.github; const parameters = { owner: this.organization.name, repo: this.name, username: username, }; if (!cacheOptions.maxAgeSeconds) { //cacheOptions.maxAgeSeconds = operations.defaults.orgRepoCollaboratorStaleSeconds; } if (cacheOptions.backgroundRefresh === undefined) { //cacheOptions.backgroundRefresh = true; } Object.assign(parameters, cacheOptions); return github.call(token, 'repos.reviewUserPermissionLevel', parameters, (error, userPermissionLevel) => { if (error) { return callback(error); } return callback(null, new RepositoryPermission(this.organization, userPermissionLevel, privates.getToken, operations)); }); } getCollaborators(cacheOptions, callback) { if (!callback && typeof (cacheOptions) === 'function') { callback = cacheOptions; cacheOptions = null; } cacheOptions = cacheOptions || {}; const privates = _private(this); const operations = privates.operations; const token = privates.getToken(); const github = operations.github; const parameters = { owner: this.organization.name, repo: this.name, per_page: 100, affiliation: cacheOptions.affiliation || 'all', }; delete cacheOptions.affiliation; if (!cacheOptions.maxAgeSeconds) { cacheOptions.maxAgeSeconds = operations.defaults.orgRepoCollaboratorsStaleSeconds; } if (cacheOptions.backgroundRefresh === undefined) { cacheOptions.backgroundRefresh = true; } return github.collections.getRepoCollaborators( token, parameters, cacheOptions, common.createInstancesCallback(this, collaboratorPermissionFromEntity, callback)); } setTeamPermission(teamId, newPermission, callback) { const [github, token] = getGitHubClient(this); const options = { id: teamId, org: this.organization.name, repo: this.name, permission: newPermission, }; github.post(token, 'orgs.addTeamRepo', options, callback); } getWebhooks(options, callback) { if (!callback && typeof(options) === 'function') { callback = options; options = null; } options = options || {}; const token = _private(this).getToken(); const operations = _private(this).operations; const parameters = { owner: this.organization.name, repo: this.name, }; const cacheOptions = { maxAgeSeconds: options.maxAgeSeconds || operations.defaults.orgRepoWebhooksStaleSeconds, }; if (options.backgroundRefresh !== undefined) { cacheOptions.backgroundRefresh = options.backgroundRefresh; } return operations.github.call(token, 'repos.getHooks', parameters, cacheOptions, callback); } getTeamPermissions(cacheOptions, callback) { if (!callback && typeof (cacheOptions) === 'function') { callback = cacheOptions; cacheOptions = null; } cacheOptions = cacheOptions || {}; const privates = _private(this); const operations = privates.operations; const token = privates.getToken(); const github = operations.github; const parameters = { owner: this.organization.name, repo: this.name, per_page: 100, }; if (!cacheOptions.maxAgeSeconds) { cacheOptions.maxAgeSeconds = operations.defaults.orgRepoTeamsStaleSeconds; } if (cacheOptions.backgroundRefresh === undefined) { cacheOptions.backgroundRefresh = true; } return github.collections.getRepoTeams( token, parameters, cacheOptions, common.createInstancesCallback(this, teamPermissionFromEntity, callback)); } } function teamPermissionFromEntity(entity) { // 'this' is bound for this function to be a private method const privates = _private(this); const operations = privates.operations; const getToken = privates.getToken; const permission = new TeamPermission(this.organization, entity, getToken, operations); return permission; } function collaboratorPermissionFromEntity(entity) { // 'this' is bound for this function to be a private method const privates = _private(this); const operations = privates.operations; const getToken = privates.getToken; const permission = new Collaborator(this.organization, entity, getToken, operations); return permission; } module.exports = Repository; function getGitHubClient(self) { const privates = _private(self); const operations = privates.operations; const token = privates.getToken(); const github = operations.github; return [github, token]; } const privateSymbol = Symbol(); function _private(self) { if (!self) { throw new Error('Not bound to an instance.'); } if (self[privateSymbol] === undefined) { self[privateSymbol] = {}; } return self[privateSymbol]; } <file_sep>/API.md # API There is an initial API implementation available to help partner teams with their scenarios. The API requires an approved API key to be used. It is designed to be run in a web service or application scenario where the API key can be secured. If it is necessary to allow others to use this API outside of a web service or cloud situation, this ask and scenario will need to be resourced and built out. The API can work with most GitHub client libraries as long as you can change the base path and also set headers. ## API Versioning The following API versions are currently supported: - "2016-12-01": supports creating repositories and also creating links - "2017-03-08": updates the shape of link responses to `aad.id` and `corporate` is renamed `aad` An API version must be provided as either a _header_ value or in the _query string_. - Valid requests MUST have a supported API version present - A request MUST provide the version in either a query string or a header parameter - A request MAY provide the API version in a header called `api-version` - A request MAY provide the API version in a query string parameter named `api-version` ## Headers and parameters Please provide: - `content-type` of `application/json` - `authorization` header using basic auth (see below) - `api-version` header, if providing the API version through this method A request that provides the `api-version` as a query string parameter might look like: `GET https://endpoint/api/people/links?api-version=-2017-03-08` ## Authorization Send a Basic Authentication where the username is `apikey` and the password is your API token. You can technically provide the token for the username and/or password. ### Tokens are scoped to specific API(s) An API key may be authorized for a specific API endpoint or scope. Please verify when you are granted API access that you have access to the endpoint that you intend to. # User link management Information about the list of linked users who have a corporate relationship with other accounts is available. > These APIs require that your API key be authorized for the `links` scope To improve responsiveness, this API uses cached data. If your service is using the data for a batch process or job, do consider keeping your own cache of the data instead of calling this API exhaustively while performing work. ## Get all linked users > GET /api/people/links ### Response HTTP ``` Status: 200 OK Content-Type: application/json; charset=utf-8 ``` Body ``` [ { "github": { "id": 1, "login": "username", "organizations": [ "OrganizationName1", "OrganizationName2 ] }, "aad": { "alias": "alias", "preferredName": "The Name", "userPrincipalName": "<EMAIL>", "id": "142-guid-123", "emailAddress": "<EMAIL>" } }, { "github": { "id": 2, "login": "username2", "organizations": [ "OrganizationName2" ] }, "aad": { "alias": "alias2", "preferredName": "Name Here", "userPrincipalName": "<EMAIL>", "id": "guid", "emailAddress": "<EMAIL>" } }, ... ] ``` ## Get a specific linked user This API will retrieve information about a specific user. The first API version to support this was `2017-03-08`. ### by GitHub username > GET /api/people/links/github/:login Where `login` is a GitHub username, case insensitive. #### Response If a link is not found for the GitHub user ``` Status: 404 Not Found ``` If a link is found ``` Status: 200 OK ``` Response body: ``` { "github": { "id": 2, "login": "username2", "organizations": [ "OrganizationName2" ] }, "aad": { "alias": "alias2", "preferredName": "<NAME>", "userPrincipalName": "<EMAIL>", "id": "guid", "emailAddress": "<EMAIL>" } } ``` ### by Azure Active Directory ID > This API returns an array if there is at least one matching account or accounts. To support scenarios with other account types or even multiple accounts such as service accounts, it is up to your application to determine how to handle more than one account. Order is not guaranteed. > GET /api/people/links/aad/:id Where `id` is an AAD ID. #### Response If a link is not registered for this user ```` Status: 404 Not Found ``` If a link is found ``` Status: 200 OK ``` Response body: ``` [ { "github": { "id": 2, "login": "username2", "organizations": [ "OrganizationName2" ] }, "aad": { "alias": "alias2", "preferredName": "Name Here", "userPrincipalName": "<EMAIL>", "id": "guid", "emailAddress": "<EMAIL>" } } ] ``` It is most common that the array will be of length === 1. If there are no results, instead of an HTTP 200, you will receive 404 (no empty array). # Repository management ## Create a repo > This API requires that your API key be authorized for the `createRepo` scope This example uses a pure POST request plus headers for authorization: ``` POST https://endpoint/api/orgName/repos?api-version=2016-12-01 HEADERS authorization: basic :key content-type: application/json BODY { "name": "my-test-repo", "private": true, "ms.license": "MIT", "ms.approval": "ReleaseReview", "ms.justification": "link to release approval", "ms.cla-entity": "Legal Entity Name", "ms.cla-mail": "<EMAIL>", "ms.notify": "<EMAIL>", "ms.onBehalfOf": "alias", "ms.teams": { "pull": [ 12346, 12348 ], "push": [ 12350 ] "admin": [ 12345 ] } } OUTPUT after the call is similar to (but redacted some) OUTPUT BODY { "github": { "id": 2, "name": "test-repo-ospo-2", "full_name": "OrgName/test-repo-ospo-2", "owner": { "login": "OrgName", "id": 1, "avatar_url": "https://avatars.githubusercontent.com/u/1?v=3", "gravatar_id": "", "url": "https://api.github.com/users/OrgName", "html_url": "https://github.com/OrgName", "repos_url": "https://api.github.com/users/OrgName/repos", "events_url": "https://api.github.com/users/OrgName/events{/privacy}", "received_events_url": "https://api.github.com/users/OrgName/received_events", "type": "Organization", "site_admin": false }, "private": true, "html_url": "https://github.com/OrgName/test-repo-ospo-2", "description": null, "fork": false, "url": "https://api.github.com/repos/OrgName/test-repo-ospo-2", "forks_url": "https://api.github.com/repos/OrgName/test-repo-ospo-2/forks", "milestones_url": "https://api.github.com/repos/OrgName/test-repo-ospo-2/milestones{/number}", "notifications_url": "https://api.github.com/repos/OrgName/test-repo-ospo-2/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/OrgName/test-repo-ospo-2/labels{/name}", "releases_url": "https://api.github.com/repos/OrgName/test-repo-ospo-2/releases{/id}", "deployments_url": "https://api.github.com/repos/OrgName/test-repo-ospo-2/deployments", "created_at": "2016-12-14T22:01:04Z", "updated_at": "2016-12-14T22:01:04Z", "pushed_at": "2016-12-14T22:01:05Z", "git_url": "git://github.com/OrgName/test-repo-ospo-2.git", "ssh_url": "git@github.com:OrgName/test-repo-ospo-2.git", "clone_url": "https://github.com/OrgName/test-repo-ospo-2.git", "svn_url": "https://github.com/OrgName/test-repo-ospo-2", "homepage": null, "size": 0, "stargazers_count": 0, "watchers_count": 0, "language": null, "has_issues": true, "has_downloads": true, "has_wiki": true, "has_pages": false, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master", "permissions": { "admin": true, "push": true, "pull": true }, "organization": { "login": "OrgName", "id": 1, "avatar_url": "https://avatars.githubusercontent.com/u/1?v=3", "gravatar_id": "", "url": "https://api.github.com/users/OrgName", "html_url": "https://github.com/OrgName", "events_url": "https://api.github.com/users/OrgName/events{/privacy}", "received_events_url": "https://api.github.com/users/OrgName/received_events", "type": "Organization", "site_admin": false }, "network_count": 0, "subscribers_count": 3, "meta": { "x-ratelimit-limit": "62500", "x-ratelimit-remaining": "59992", "x-ratelimit-reset": "1481754433", "x-oauth-scopes": "repo, delete_repo, admin:org, admin:org_hook", "x-github-request-id": "ABC", "location": "https://api.github.com/repos/OrgName/test-repo-ospo-2", "etag": "\"3f68722071b86e49c8e25f1b76e61a32\"", "status": "201 Created", "statusActual": 201 } }, "name": "test-repo-ospo-2", "tasks": [ { "message": "Successfully added the \"test-repo-ospo-2\" repo to GitHub team ID \"2\" with permission level PUSH." } ], "notified": [ "<EMAIL>" ] } ``` This example uses headers on top of a standard GitHub client: ``` POST https://endpoint/api/Microsoft/repos HEADERS content-type: application/json api-version: 2016-12-01 authorization: Basic :key ms-license: MIT ms-approval: SmallLibrariesToolsSamples ms-cla-entity: Legal Entity Name ms-cla-mail: <EMAIL> ms-notify: <EMAIL> ms-onbehalfof: alias BODY { "name": "my-test-repo", "private": true } ``` Bare minimum GitHub body component, with the type JSON, is the `name` field. You can see the GitHub API documentation here: https://developer.github.com/v3/repos/#create - name (name of the repo) - private (true/false) > Note: GitHub has an input field called `team_id`. This gives _read_ access to a team ID. Our API is more sophisticated and useful since it can also assign teams to the repo with various permissions. We do not recommend providing `team_id` as a result. API Version: - api-version should be in the header or query string; at this time only 2016_12_01 is supported Casing: - At this time, casing is important for values Team permissions must be set at create time as well. The API will support up to 12 team permissions plus an everyone read team permission if wanted. This design allows for specifying teams as headers. If you are setting a header, you may set it to a JSON stringified object representing the needed value. If you are setting in the body, please just provide the rich object value. You need to provide team IDs, not team names, at this time. - ms.teams (or ms-teams and JSON stringified object for header) Team permission (ms.teams) value: ``` { "pull": [1], "push": [], "admin": [2, 3] } ``` Always try and provide a minimum number of administrator teams, same goes for write teams (push), and encourage the standard Git workflow. <file_sep>/middleware/hsts.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const hsts = require('hsts'); module.exports = hsts({ maxAge: 10886400000, // Must be at least 18 weeks to be approved includeSubDomains: true, // Must be enabled to be approved preload: true, }); <file_sep>/test/configuration.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const assert = require('chai').assert; const keyVaultHelper = require('keyvault-configuration-resolver'); const fakeKeyVaultClient = require('./fakeKeyVaultClient'); function createFakeWithKeys() { const faker = fakeKeyVaultClient(); const secretId = faker.storeSecret('test', 'big secret', { tag1: 'p1', tag2: 'and tag 2', }); return [faker, secretId]; } describe('configuration', () => { // config as code: tests have moved to the refactored npm, painless-config-as-code describe('keyVaultHelper', () => { it('non-URL values passthrough', () => { const fake = createFakeWithKeys(); // es6 destructuring would be nice const keyVaultClient = keyVaultHelper(fake[0]); const config = { a: 'animal', b: 'bat', c: 'cherry', d: true, e: 5, }; keyVaultClient.getObjectSecrets(config, (error) => { assert.isNull(error, 'no exception'); assert.equal(config.a, 'animal', 'string works'); assert.equal(config.b, 'bat', 'string works'); assert.equal(config.c, 'cherry', 'string works'); assert.isTrue(config.d, 'bool works'); assert.equal(config.e, 5, 'number is unaffected'); }); }); it('keyvault:// protocol works', () => { const fake = createFakeWithKeys(); // es6 destructuring would be nice const keyVaultClient = keyVaultHelper(fake[0]); const secretId = fake[1]; const keyVaultSchemeSecretId = secretId.replace('https://', 'keyvault://'); const config = { bigPasscode: keyVaultSchemeSecretId, }; keyVaultClient.getObjectSecrets(config, () => { assert.equal(config.bigPasscode, 'big secret', 'secret read OK'); }); }); it('deeply nested KeyVault URLs work', () => { const fake = createFakeWithKeys(); // es6 destructuring would be nice const keyVaultClient = keyVaultHelper(fake[0]); const secretId = fake[1]; const keyVaultSchemeSecretId = secretId.replace('https://', 'keyvault://'); const config = { deep: { object: { nesting: { test: { value: { is: keyVaultSchemeSecretId, } } } } } }; keyVaultClient.getObjectSecrets(config, () => { assert.equal(config.deep.object.nesting.test.value.is, 'big secret', 'secret read OK'); }); }); it('keyvault:// tag properties work', () => { const fake = createFakeWithKeys(); // es6 destructuring would be nice const keyVaultClient = keyVaultHelper(fake[0]); const secretId = fake[1]; const keyVaultSchemeSecretId = secretId.replace('https://', 'keyvault://'); const keyVaultSchemeSecretIdWithTag = secretId.replace('https://', 'keyvault://tag1@'); const config = { taggedProperty: keyVaultSchemeSecretIdWithTag, kvProperty: keyVaultSchemeSecretId, }; keyVaultClient.getObjectSecrets(config, () => { assert.equal(config.kvProperty, 'big secret', 'secret read OK'); assert.equal(config.taggedProperty, 'p1', 'tag read OK'); }); }); it('keyvault:// tag properties return undefined if missing', () => { const fake = createFakeWithKeys(); // es6 destructuring would be nice const keyVaultClient = keyVaultHelper(fake[0]); const secretId = fake[1]; const keyVaultSchemeSecretIdWithTag = secretId.replace('https://', 'keyvault://undefinedtagthing@'); const config = { taggedProperty: keyVaultSchemeSecretIdWithTag, }; keyVaultClient.getObjectSecrets(config, () => { assert.isUndefined(config.taggedProperty, '=== undefined'); }); }); it('URL values passthrough', () => { const fake = createFakeWithKeys(); // es6 destructuring would be nice const keyVaultClient = keyVaultHelper(fake[0]); const secretId = fake[1]; const config = { a: secretId, }; keyVaultClient.getObjectSecrets(config, (error) => { assert.isNull(error, 'no exception'); assert.equal(config.a, secretId, 'KeyVault URL is passed through'); }); }); it('keyvault:// on an invalid secret stops processing', () => { const fake = createFakeWithKeys(); // es6 destructuring would be nice const keyVaultClient = keyVaultHelper(fake[0]); const config = { a: 'keyvault://invalid/secrets/hello/1', }; keyVaultClient.getObjectSecrets(config, (error) => { assert.isNotNull(error, 'exception thrown due to KeyVault client error'); }); }); }); }); <file_sep>/views/email/footer.pug //- //- Copyright (c) Microsoft. All rights reserved. //- .appFooter table(style='width:100%;', cellspacing=0, cellpadding=0) tbody tr td(width='100%', colspan='2', style='height:28px') p tr td(width='100%', colspan='2', style='height:12px) tr td td p.footer(style='text-align: right') if version | Repos #{version} br if correlationId | Correlation ID: #{correlationId} if view | Template: #{view} <file_sep>/middleware/rawBodyParser.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = function rawBodyParser(req, res, next) { // Since we have a site-wide implementation of body parser, this allows // routines access to the raw body if it is needed req._raw = ''; req.on('data', chunk => { req._raw += chunk; }); next(); }; <file_sep>/views/org/approvals.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content div.container if teamResponsibilities && teamResponsibilities.length && teamResponsibilities.length > 0 h1 Approvals for Your Review div.container each entry in teamResponsibilities h2(style='padding-bottom: 16px') =entry.type === 'repo' ? 'New Repository' : 'Join a team' | &nbsp; small Permission Request div.row div.col-md-5.col-lg-5 form(method='post', action='/' + entry._teamInstance.org.name + '/teams/' + entry._teamInstance.name + '/approvals/' + entry.RowKey) p //-if entry.issue //- NOTE: Commenting out GitHub links with approval repos, since they are being deprecated internally and this is a quick fix a.btn.btn-sm.btn-muted(href='https://github.com/' + entry._teamInstance.org.name + '/' + entry._teamInstance.org.getWorkflowRepository().name + '/issues/' + entry.issue, target='_new')= entry.issue | &nbsp; a.btn.btn-sm.btn-muted(href='/' + entry._teamInstance.org.name + '/teams/' + entry._teamInstance.name + '/approvals/' + entry.RowKey) i.glyphicon.glyphicon-zoom-in | &nbsp; | View Detailed Request Page if entry.active === true p input.btn.btn-sm.btn-default(type='submit', name='approve', value='Approve') p input.btn.btn-sm.btn-primary(type='submit', name='approveWithComment', value='Approve with Comment...') p input.btn.btn-sm.btn-default(type='submit', name='deny', value='Deny...') div.col-md-7.col-lg-7 p a.btn.btn-sm.btn-muted(href='https://github.com/' + entry.ghu, target='_new')= entry.ghu a.btn.btn-sm.btn-muted-more(href='mailto:' + entry.email)= entry.email ? entry.email : 'Unknown' if entry.type == 'repo' && entry.repoName h3 Repository Information blockquote p strong Name br = entry.repoName p strong Organization br = entry.org if entry.repoVisibility !== undefined p strong Visibility br = (entry.repoVisibility == 'public' ? 'Public' : 'Private') if entry.justification h3 Business Justification blockquote =entry.justification hr h1 Requests you have made if usersRequests && usersRequests.length && usersRequests.length > 0 div.container each myRequest in usersRequests div.row(style='margin-top:24px') div.col-md-5.col-lg-5 //-if myRequest._teamInstance && myRequest.issue //- NOTE: Commenting out GitHub links with approval repos, since they are being deprecated internally and this is a quick fix p a.btn.btn-sm.btn-muted(href='https://github.com/' + myRequest._teamInstance.org.name + '/' + myRequest._teamInstance.org.getWorkflowRepository().name + '/issues/' + myRequest.issue, target='_new') Tracking Issue ##{myRequest.issue} form(method='post', action='/approvals/' + myRequest.RowKey + '/cancel') p input.btn.btn-sm.btn-default(type='submit', value='Cancel my request') div.col-md-7.col-lg-7 if myRequest.type == 'repo' && myRequest.repoName h3 Repository Information blockquote p strong Name br = myRequest.repoName p strong Organization br = myRequest.org if myRequest.repoVisibility !== undefined p strong Visibility br = (myRequest.repoVisibility == 'public' ? 'Public' : 'Private') else if myRequest.type == 'joinTeam' h3 Request to join a team h5 Organization p= myRequest.org h5 Team Name p= myRequest.teamname h5 GitHub Team ID p= myRequest.teamid if myRequest.justification h3 My Business Justification blockquote =myRequest.justification hr else p.lead There are no active requests open at this time. <file_sep>/views/org/team/approvals.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content div.container h1 Approvals if pendingApprovals && pendingApprovals.length && pendingApprovals.length > 0 div div.container each entry in pendingApprovals h3(style='padding-bottom: 16px') =entry.type === 'repo' ? 'New Repository' : 'Join team' div.row div.col-md-4.col-lg-4 form(method='post', action=teamUrl + 'approvals/' + entry.RowKey) p //-if entry.issue //- NOTE: Commenting out GitHub links with approval repos, since they are being deprecated internally and this is a quick fix a.btn.btn-sm.btn-muted(href='https://github.com/' + team.org.getWorkflowRepository().full_name + '/issues/' + entry.issue, target='_new')= entry.issue //-| &nbsp; a.btn.btn-sm.btn-muted(href=teamUrl + 'approvals/' + entry.RowKey) i.glyphicon.glyphicon-zoom-in | &nbsp; | Detailed Request if entry.requestedTime | &nbsp; &nbsp; time(datetime=entry.requestedTime.toISOString())= entry.requestedTime.toDateString() if entry.active === true h5 ACTIONS p input.btn.btn-sm.btn-default(type='submit', name='approve', value='Approve') if entry.type != 'repo' p input.btn.btn-sm.btn-primary(type='submit', name='approveWithComment', value='Approve with Comment...') p input.btn.btn-sm.btn-default(type='submit', name='deny', value='Deny...') div.col-md-4.col-lg-4 if entry.type === 'repo' h5 REPO p= entry.repoName h5 PERMISSIONS p small Open the "Detailed Request" view to see permissions and other info for the repo request. else if entry.type === 'joinTeam' h5 TEAM NAME p a(href='https://github.com/orgs/' + entry.org + '/teams/' + entry.teamname, target='_new')= entry.teamname if entry.justification h5 BUSINESS JUSTIFICATION blockquote =entry.justification div.col-md-4.col-lg-4 if entry.completeRequestingUser && entry.completeRequestingUser.corporateAlias() h5 CORPORATE IDENTITY p - var corporateUser = entry.completeRequestingUser if corporateUser.link if corporateUser.link.aadname a.btn.btn-sm.btn-muted(href=corporateUser.corporateProfileUrl(), target='_new')= corporateUser.link.aadname if corporateUser.corporateAlias() a.btn.btn-sm.btn-muted-more(href=corporateUser.corporateProfileUrl(), target='_new') = corporateUser.corporateAlias() + ' ' i.glyphicon.glyphicon-share-alt if entry.ghu h5 GITHUB PROFILE p a.btn.btn-sm.btn-muted(href='https://github.com/' + entry.ghu, target='_new')= entry.ghu if entry.email h5 EMAIL p a.btn.btn-sm.btn-muted-more(href='mailto:' + entry.email)= entry.email ? entry.email : 'Unknown' | &nbsp; a.btn.btn-sm.btn-default(href='mailto:' + entry.email) Send Mail else p No pending approvals. Nice work!<file_sep>/middleware/passport-routes.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const querystring = require('querystring'); const utils = require('../utils'); module.exports = function configurePassport(app, passport, initialConfig) { app.get('/signin', function (req, res) { utils.storeReferrer(req, res, '/auth/azure', 'signin page hit, need to go authenticate'); }); // ---------------------------------------------------------------------------- // passport integration with GitHub // ---------------------------------------------------------------------------- app.get('/signin/github', function (req, res) { utils.storeReferrer(req, res, '/auth/github', '/signin/github authentication page requested'); }); var ghMiddleware = passport.authorize('github'); const githubFailureRoute = { failureRedirect: '/auth/github/', }; var ghMiddlewareWithFailure = passport.authorize('github', githubFailureRoute); function authenticationCallback(secondaryAuthScheme, secondaryAuthProperty, req, res, next) { const after = (req, res) => utils.redirectToReferrer(req, res, '/', `authentication callback of type ${secondaryAuthScheme} and property ${secondaryAuthProperty}`); if (initialConfig.authentication.scheme !== secondaryAuthScheme) { return hoistAccountToSession(req, req.account, secondaryAuthProperty, (error) => { return error ? next(error) : after(req, res); }); } return after(req, res); } function processSignout(primaryAuthScheme, secondaryAuthProperties, req, res, next) { if (initialConfig.authentication.scheme === primaryAuthScheme) { return res.redirect('/signout'); } const after = (req, res) => { var url = req.headers.referer || '/'; if (req.query.redirect === 'github') { url = 'https://github.com/logout'; } res.redirect(url); }; const secondaryProperties = secondaryAuthProperties.split(','); let dirty = false; secondaryProperties.forEach((propertyName) => { if (req.user && req.user[propertyName] !== undefined) { delete req.user[propertyName]; dirty = true; } }); if (dirty) { return resaveUser(req, (error) => { return error ? next(error) : after(req, res); }); } return after(req, res); } // User-beware, I should not be writing my own truncating shallow object copy code function shallowTruncatingCopy(obj) { let o = {}; for (const entity in obj) { const value = obj[entity]; if (typeof value === 'object') { o[entity] = {}; for (const property in value) { if (typeof value[property] !== 'object') { o[entity][property] = value[property]; } } } else { o[entity] = value; } } return o; } function hoistAccountToSession(req, account, property, callback) { const serializer = req.app._sessionSerializer; const entity = account[property]; if (entity === undefined) { return callback(new Error(`No entity available with the property ${property} to be hoisted.`)); } if (serializer === undefined) { req.user[property] = entity; return callback(); } const clone = shallowTruncatingCopy(req.user); clone[property] = entity; resaveUser(req, clone, callback); } function resaveUser(req, clone, callback) { if (typeof clone === 'function') { callback = clone; clone = undefined; } if (clone === undefined) { clone = shallowTruncatingCopy(req.user); } req.login(clone, callback); } app.get('/auth/github', ghMiddleware); app.get('/auth/github/callback', ghMiddlewareWithFailure, authenticationCallback.bind(null, 'github', 'github')); if (initialConfig.authentication.scheme === 'aad') { app.get('/signin/github/join', (req, res) => { res.render('creategithubaccount', { title: 'Create a GitHub account', user: req.user, config: initialConfig.obfuscatedConfig, }); }); app.get('/auth/github/join', (req, res) => { var config = req.app.settings.runtimeConfig; var authorizeRelativeUrl = req.app.settings['runtime/passport/github/authorizeUrl'].replace('https://github.com', ''); var joinUrl = 'https://github.com/join?' + querystring.stringify({ return_to: `${authorizeRelativeUrl}?` + querystring.stringify({ client_id: config.github.oauth2.clientId, redirect_uri: config.github.oauth2.callbackUrl, response_type: 'code', scope: req.app.settings['runtime/passport/github/scope'], }), source: 'oauth', }); res.redirect(joinUrl); }); } app.get('/signout', function (req, res) { var config = req.app.settings.runtimeConfig; req.logout(); if (req.session) { delete req.session.enableMultipleAccounts; delete req.session.selectedGithubId; } if (config.authentication.scheme === 'github') { res.redirect('https://github.com/logout'); } else { var unlinked = req.query.unlink !== undefined; res.render('message', { message: unlinked ? `Your ${config.brand.companyName} and GitHub accounts have been unlinked. You no longer have access to any ${config.brand.companyName} organizations, and you have been signed out of this portal.` : 'Goodbye', title: 'Goodbye', buttonText: unlinked ? 'Re-link' : 'Sign In', config: initialConfig.obfuscatedConfig, }); } }); app.get('/signout/github', processSignout.bind(null, 'github', 'github,githubIncreasedScope')); // ---------------------------------------------------------------------------- // Expanded GitHub auth scope routes // ---------------------------------------------------------------------------- app.get('/signin/github/increased-scope', function (req, res) { utils.storeReferrer(req, res, '/auth/github/increased-scope', 'request for the /signin/github/increased-scope page to go auth with more GitHub scope'); }); app.get('/auth/github/increased-scope', passport.authorize('expanded-github-scope')); // TODO: Validate that the increased scope user ID === the actual user ID app.get('/auth/github/callback/increased-scope', passport.authorize('expanded-github-scope', { failureRedirect: '/auth/github/increased-scope', }), authenticationCallback.bind(null, 'all', 'githubIncreasedScope')); // ---------------------------------------------------------------------------- // passport integration with Azure Active Directory // ---------------------------------------------------------------------------- var aadMiddleware = initialConfig.authentication.scheme === 'github' ? passport.authorize('azure-active-directory') : passport.authenticate('azure-active-directory'); app.get('/auth/azure', aadMiddleware); app.post('/auth/azure/callback', aadMiddleware, authenticationCallback.bind(null, 'aad', 'azure')); app.get('/signin/azure', function (req, res) { utils.storeReferrer(req, res, '/auth/azure', 'request for the /signin/azure page, need to authenticate'); }); app.get('/signout/azure', processSignout.bind(null, 'aad', 'azure')); }; <file_sep>/middleware/appInsights.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const insights = require('../lib/insights'); module.exports = function initializeAppInsights(app, config) { let client = undefined; if (!config) { // Configuration failure happened ahead of this module return; } const key = config.telemetry && config.telemetry.applicationInsightsKey ? config.telemetry.applicationInsightsKey : null; if (key) { const appInsights = require('applicationinsights'); const instance = appInsights.setup(key); client = instance.getClient(key); instance.start(); } app.use((req, res, next) => { // Acknowledge synthetic tests immediately without spending time in more middleware if (req.headers && req.headers['synthetictest-id'] !== undefined && req.headers['x-ms-user-agent'] !== undefined && req.headers['x-ms-user-agent'].includes('System Center')) { return res.status(204).send(); } // Provide application insight event tracking with correlation ID const extraProperties = { correlationId: req.correlationId, }; req.insights = insights(extraProperties, client); next(); }); return insights({}, client); }; <file_sep>/utils.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const async = require('async'); // ---------------------------------------------------------------------------- // Returns an integer, random, between low and high (exclusive) - [low, high) // ---------------------------------------------------------------------------- exports.randomInteger = function (low, high) { return Math.floor(Math.random() * (high - low) + low); }; // ---------------------------------------------------------------------------- // Session utility: Store the referral URL, if present, and redirect to a new // location. // ---------------------------------------------------------------------------- exports.storeReferrer = function storeReferrer(req, res, redirect, optionalReason) { const eventDetails = { method: 'storeReferrer', reason: optionalReason || 'unknown reason', }; if (req.session && req.headers && req.headers.referer && req.session.referer !== undefined && !req.headers.referer.includes('/signout')) { req.session.referer = req.headers.referer; eventDetails.referer = req.headers.referer; } if (redirect) { eventDetails.redirect = redirect; if (req.insights) { req.insights.trackEvent('RedirectWithReferrer', eventDetails); } res.redirect(redirect); } }; // ---------------------------------------------------------------------------- // Session utility: store the original URL // ---------------------------------------------------------------------------- exports.storeOriginalUrlAsReferrer = function storeOriginalUrl(req, res, redirect, optionalReason) { storeOriginalUrlAsVariable(req, res, 'referer', redirect, optionalReason); }; exports.redirectToReferrer = function redirectToReferrer(req, res, url, optionalReason) { url = url || '/'; const alternateUrl = popSessionVariable(req, res, 'referer'); const eventDetails = { method: 'redirectToReferrer', reason: optionalReason || 'unknown reason', }; if (req.insights) { req.insights.trackEvent('RedirectToReferrer', eventDetails); } res.redirect(alternateUrl || url); }; function storeOriginalUrlAsVariable(req, res, variable, redirect, optionalReason) { const eventDetails = { method: 'storeOriginalUrlAsVariable', variable: variable, redirect: redirect, reason: optionalReason || 'unknown reason', }; if (req.session && req.originalUrl) { req.session[variable] = req.originalUrl; } if (redirect) { if (req.insights) { req.insights.trackEvent('RedirectFromOriginalUrl', eventDetails); } res.redirect(redirect); } } exports.storeOriginalUrlAsVariable = storeOriginalUrlAsVariable; function popSessionVariable(req, res, variableName) { if (req.session && req.session[variableName] !== undefined) { const url = req.session[variableName]; delete req.session[variableName]; return url; } } exports.popSessionVariable = popSessionVariable; // ---------------------------------------------------------------------------- // Provide our own error wrapper and message for an underlying thrown error. // Useful for the user-presentable version. // ---------------------------------------------------------------------------- exports.wrapError = function (error, message, userIntendedMessage) { var err = new Error(message); err.innerError = error; if (error && error.stack) { err.stack = error.stack; } if (userIntendedMessage === true) { err.skipLog = true; } return err; }; // ---------------------------------------------------------------------------- // Retrieves all pages of a GitHub (octonode) API endpoint by following the // next link, if present, in results. Each page is of max GitHub-allowed size, // 100 items. Keep in mind that each page is 1 API call from the API allownace. // ---------------------------------------------------------------------------- exports.retrieveAllPages = function retrieveAllPages(method, optionalFilter, callback) { if (typeof optionalFilter == 'function') { callback = optionalFilter; optionalFilter = null; } var done = false; var page = 1; var results = []; async.whilst( function () { return !done; }, function (cb) { var params = { page: page++, per_page: 100, }; if (optionalFilter) { Object.assign(params, optionalFilter); } method.call(null, params, function (error, result, headers) { if (error) { done = true; } else { if (result && result.length) { results = results.concat(result); } done = !(headers && headers.link && headers.link.indexOf('rel="next"') >= 0); } cb(error); }); }, function (error) { callback(error, error ? undefined : results); }); }; // ---------------------------------------------------------------------------- // A destructive removal function for an object. Removes a single key. // ---------------------------------------------------------------------------- exports.stealValue = function steal(obj, key) { if (obj[key] !== undefined) { var val = obj[key]; delete obj[key]; return val; } }; // ---------------------------------------------------------------------------- // Given a list of string values, check a string, using a case-insensitive // comparison. // ---------------------------------------------------------------------------- exports.inListInsensitive = function inListInsensitive(list, value) { value = value.toLowerCase(); for (var i = 0; i < list.length; i++) { if (list[i].toLowerCase() === value) { return true; } } return false; }; // ---------------------------------------------------------------------------- // Given a list of lowercase values, check whether a value is present. // ---------------------------------------------------------------------------- exports.isInListAnycaseInLowercaseList = function isInListAnycaseInLowercaseList(list, value) { value = value.toLowerCase(); for (var i = 0; i < list.length; i++) { if (list[i] === value) { return true; } } return false; }; // ---------------------------------------------------------------------------- // Given an array of things that have an `id` property, return a hash indexed // by that ID. // ---------------------------------------------------------------------------- exports.arrayToHashById = function arrayToHashById(inputArray) { var hash = {}; if (inputArray && inputArray.length) { for (var i = 0; i < inputArray.length; i++) { if (inputArray[i] && inputArray[i].id) { hash[inputArray[i].id] = inputArray[i]; } } } return hash; }; // ---------------------------------------------------------------------------- // Obfuscate a string value, optionally leaving a few characters visible. // ---------------------------------------------------------------------------- exports.obfuscate = function obfuscate(value, lastCharactersShowCount) { if (value === undefined || value === null || value.length === undefined) { return value; } var length = value.length; lastCharactersShowCount = lastCharactersShowCount || 0; lastCharactersShowCount = Math.min(Math.round(lastCharactersShowCount), length - 1); var obfuscated = ''; for (var i = 0; i < length - lastCharactersShowCount; i++) { obfuscated += '*'; } for (var j = length - lastCharactersShowCount; j < length; j++) { obfuscated += value[j]; } return obfuscated; }; // ---------------------------------------------------------------------------- // A very basic breadcrumb stack that ties in to an Express request object. // ---------------------------------------------------------------------------- exports.addBreadcrumb = function (req, breadcrumbTitle, optionalBreadcrumbLink) { if (req === undefined || req.baseUrl === undefined) { throw new Error('addBreadcrumb: did you forget to provide a request object instance?'); } if (!optionalBreadcrumbLink && optionalBreadcrumbLink !== false) { optionalBreadcrumbLink = req.baseUrl; } if (!optionalBreadcrumbLink && optionalBreadcrumbLink !== false) { optionalBreadcrumbLink = '/'; } var breadcrumbs = req.breadcrumbs; if (breadcrumbs === undefined) { breadcrumbs = []; } breadcrumbs.push({ title: breadcrumbTitle, url: optionalBreadcrumbLink, }); req.breadcrumbs = breadcrumbs; }; exports.stackSafeCallback = function stackSafeCallback(callback, err, item, extraItem) { // Works around RangeError: Maximum call stack size exceeded. async.setImmediate(() => { callback(err, item, extraItem); }); }; exports.createSafeCallbackNoParams = function createSafeCallbackNoParams(cb) { return () => { exports.stackSafeCallback(cb); }; }; <file_sep>/business/metrics.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const async = require('async'); const azure = require('azure-storage'); function retrieveFromAzureTable(config, type, id, callback) { const tableService = azure.createTableService(config.account, config.key); let metrics = []; let done = false; let continuationToken = null; async.whilst( function areWeDone() { return !done; }, function grabPage(cb) { const query = new azure.TableQuery().where('PartitionKey eq ?', type); if (id) { query.and('RowKey eq ?', id); } tableService.queryEntities(config.tableName, query, continuationToken, (error, results) => { if (error) { done = true; return cb(error); } if (results.continuationToken) { continuationToken = results.continuationToken; } else { done = true; } if (results && results.entries) { results.entries.forEach(entry => { metrics.push(reduceEntity(entry)); }); } cb(); }); }, function (queryingError) { return callback(queryingError, metrics); }); } function reduceEntity(instance) { if (!instance) { return instance; } for (let column in instance) { if (instance[column]) { instance[column] = instance[column]._; } } return instance; } module.exports.retrieveFromAzureTable = retrieveFromAzureTable; <file_sep>/views/org/pending.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block js_doc_ready | var inviteInterfaceUpdateOnClick = function(){$('#openInviteButton').removeClass('btn-primary').addClass('btn-muted');$('#inviteAcceptedButton').removeClass('btn-muted').addClass('btn-primary');alert('On the next page press the green Join button.\n\nThen close the page and return here to continue onboarding to #{org.name}.');return true;}; | $('#openInviteButton').click(inviteInterfaceUpdateOnClick); | $('#openInviteButton2').click(inviteInterfaceUpdateOnClick); block content .container if writeOrgFailureMessage .alert.alert-danger if showTwoFactorWarning h2 Two-factor authentication is required to continue else h2 GitHub API warning p.lead= writeOrgFailureMessage if state === 'pending' && showTwoFactorWarning p(style='margin-top:24px') | This organization has enabled GitHub's&nbsp; a(href='https://help.github.com/articles/requiring-two-factor-authentication-in-your-organization/', target='_new') two-factor authentication requirement |. As a result, you cannot join the org until you&nbsp; a(href='https://github.com/settings/two_factor_authentication/configure') turn on two-factor auth&nbsp; | with GitHub. form(method='post') ul.list-inline li a.btn.btn-primary(href='https://github.com/settings/two_factor_authentication/configure', target='_new') Configure 2FA on GitHub li input.btn.btn-default(type='submit', value='Validate 2FA and continue', title='Pending - you have already been invited') include twoFactorInstructions else if state == 'pending' if showApplicationPermissionWarning // Need to figure out what the right message is to show here h1 Action Required: You've been manually invited! p.lead GitHub has sent an invitation to <em>#{org.name}</em>. p | For security purposes, GitHub requires you to accept the invitation directly on their web site.&nbsp; strong You must come back to this tab after accepting your invitation to continue setting up your open source teams and permissions. div.row div.col-md-6.col-lg-6 h3 Step 1: Accept your invite from GitHub p | Open your invite on GitHub and press the green button. p a.btn.btn-lg.capitalize.btn-primary#openInviteButton(href='https://github.com/orgs/' + org.name + '/invitation', target='_new') Open your #{org.name} invitation h3 Step 2: Return back here p | After pressing the green Join button, close the GitHub site and click Continue on this page. We'll then take you to a security check the "Join Teams" experience. p a.btn.btn-lg.btn-muted#inviteAcceptedButton(href=org.baseUrl + 'join' + (onboarding ? '?onboarding=' + onboarding : '')) I've accepted my invite on GitHub.com, continue... div.col-md-6.col-lg-6 p(style='border:1px solid #ccc; padding:12px') a#openInviteButton2(target='_new', href='https://github.com/orgs/' + org.name + '/invitation') img.img-responsive(src='/img/GitHubInvitation.png', title='A screenshot of what the GitHub invitation looks like. The experience is hosted outside of this portal and actually on GitHub.com', alt='A screenshot of what the GitHub invitation looks like. The experience is hosted outside of this portal and actually on GitHub.com') br | A sample GitHub invite. Press the green button and close the page. else if state == 'active' h1 You're now a member of #{org.name}. p You are currently a member of this additional organization. No additional work is required to gain access to it. p If you need to join a specific team to gain additional permissions, you can use the Join a Team experience on this site. p a.btn.btn-primary(href='/teams') Join a team else div.row div.col-md-8.col-lg-8 h1 Want to join #{org.name}? if hasIncreasedScope form(method='post') p(style='margin-top:24px') input.btn.btn-primary.btn-huge(type='submit', value='Join ' + org.name) else p.lead Quick way: Authorize us to join on your behalf p. Authorize this site to use the <a href="https://developer.github.com/v3/oauth/#scopes" target="_new"><code>org:write</code></a> permission and immediately join the organization. p a.btn.btn-primary.btn-huge(href='/' + org.name + '/join/express' + (onboarding ? '?onboarding=' + onboarding : '')) | Join #{org.name} hr p.lead Manual way: Follow this process ul li We'll explain how the GitHub invitation works. li An email invitation will be sent to you from GitHub. li You accept the invitation in on GitHub.com. li You come back to this site and continue onboarding. form(method='post') p input.btn.btn-muted-more.btn-lg(type='submit', value='Manually Join ' + org.name) if onboarding p &nbsp; hr h3 Your onboarding progress h5 | Sign in to your GitHub &amp; #{config.brand.companyName} accounts&nbsp; i.glyphicon.glyphicon-ok h5 | Link your accounts&nbsp; i.glyphicon.glyphicon-ok h5.text-primary | Join your first GitHub organization h5 | Multifactor security checkup h5 | Profile review h5 | Publish your membership <em>(optional)</em> h5 | Join a team <em>(optional)</em> div.col-md-4.col-lg-4 if orgUser p img.img-thumbnail.img-responsive(src=orgUser.avatar(400), alt=(orgUser.name || orgUser.login)) h3= orgUser.name h4= orgUser.login p(style='margin-top:18px') a.btn.btn-sm.btn-muted(href='https://github.com/' + org.name, target='_new') | Open on GitHub hr div.row div.col-md-6.col-lg-6 if orgUser.company h6 Company p= orgUser.company if orgUser.location h6 Location p= orgUser.location if orgUser.email h6 E-mail p= orgUser.email if orgUser.otherFields.blog h6 On the Web p a(href=orgUser.otherFields.blog, target='_new') = orgUser.otherFields.blog + ' ' i.glyphicon.glyphicon-share-alt if orgUser.getProfileCreatedDate() h6 Created p time(datetime=orgUser.getProfileCreatedDate().toISOString())= orgUser.getProfileCreatedDate().toDateString() if orgUser.getProfileCreatedDate() h6 Updated p time(datetime=orgUser.getProfileUpdatedDate().toISOString())= orgUser.getProfileUpdatedDate().toDateString() hr if org.inner.settings.organizationPurpose h6 How we use this organization p= org.inner.settings.organizationPurpose if org.inner.settings.type h6 Supported Repository Types ul.list-unstyled li Public if org.inner.settings.type == 'publicprivate' || org.inner.settings.type == 'private' li Private div.col-md-6.col-lg-h6 h6 Repositories if orgUser.otherFields.public_repos h2 = orgUser.otherFields.public_repos + ' ' small Public //- small Public if orgUser.otherFields.total_private_repos h2 = orgUser.otherFields.total_private_repos + ' ' small Private hr h6 Remaining Repositories if orgUser.otherFields.plan && orgUser.otherFields.plan.private_repos h2 = orgUser.otherFields.plan.name + ' ' small Plan h2 = (orgUser.otherFields.plan.private_repos - orgUser.otherFields.total_private_repos) + ' ' small Private h2 | &infin;&nbsp; small Public <file_sep>/views/email/membershipApprovals/requestSubmitted.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../body block content h2 = team | &nbsp; small= org p. Your permission request has been submitted. p a(href=reposSiteUrl + 'approvals/', style='display:inline-block;background-color:#eee;font-size:18px;padding:12px') Review your requests here h3 Request details table.technical(style='width:80%') thead tr th(colspan=2) Requested by tbody tr td p a(href='https://github.com/' + approvalRequest.ghu)= approvalRequest.ghu td= approvalRequest.email thead tr th(colspan=2) Team tbody tr td GitHub organization td= org tr td Team name td= team if approvalRequest.justification thead tr th(colspan=2) Justification provided by #{personName || personMail} tr td(colspan=2) p= approvalRequest.justification p If you prefer not to click on email links, you can find your requests by: ul li Navigating to the open source repos site at #{reposSiteUrl} li Go to the <em>/approvals</em> URL li Review your request details <file_sep>/business/teamSearch.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const Q = require('q'); const defaultPageSize = 20; // GitHub.com seems to use a value around 33 function TeamSearch(teams, options) { options = options || {}; this.teams = teams; //teamsWithMembers this.pageSize = options.pageSize || defaultPageSize; this.phrase = options.phrase; this.set = options.set; this.yourTeamsMap = options.yourTeamsMap || new Map(); } TeamSearch.prototype.search = function search(tags, page, sort) { const self = this; self.page = parseInt(page); self.tags = tags; self.sort = sort ? sort.charAt(0).toUpperCase() + sort.slice(1) : 'Alphabet'; return Q.all( self.filterByType(self.set) .filterByPhrase(self.phrase) .determinePages()['sortBy' + self.sort]() .getPage(self.page) ); }; TeamSearch.prototype.determinePages = function() { this.totalPages = Math.ceil(this.teams.length / this.pageSize); this.totalTeams = this.teams.length; return this; }; TeamSearch.prototype.getPage = function(page) { this.teams = this.teams.slice((page - 1) * this.pageSize, ((page - 1) * this.pageSize) + this.pageSize); this.pageFirstTeam = 1 + ((page - 1) * this.pageSize); this.pageLastTeam = this.pageFirstTeam + this.teams.length - 1; return this; }; function teamMatchesPhrase(repo, phrase) { // Poor man's search, starting with just a raw includes search // Assumes that phrase is already lowercase to work let string = ((repo.name || '') + (repo.description || '')).toLowerCase(); return string.includes(phrase); } TeamSearch.prototype.filterByType = function (setType) { let filter = null; if (setType === 'your' || setType === 'available') { const showIfInSet = setType === 'your'; filter = t => { const map = this.yourTeamsMap || new Map(); return map.has(t.id) === showIfInSet; }; } if (filter) { this.teams = this.teams.filter(filter); } return this; }; TeamSearch.prototype.filterByPhrase = function (phrase) { if (phrase) { phrase = phrase.toLowerCase(); this.teams = this.teams.filter(t => { return teamMatchesPhrase(t, phrase); }); } return this; }; TeamSearch.prototype.sortByAlphabet = function() { this.teams.sort((a, b) => { let nameA = a.name.toLowerCase(); let nameB = b.name.toLowerCase(); if (nameA < nameB) { return -1; } if (nameA > nameB) { return 1; } return 0; }); return this; }; module.exports = TeamSearch; <file_sep>/routes/settings/digestReports.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const buildRecipientMap = require('../../jobs/reports/consolidated').buildRecipientMap; const RedisHelper = require('../../lib/redis'); const router = express.Router(); const systemWidePermissionsMiddleware = require('../../middleware/github/systemWidePermissions'); router.use(systemWidePermissionsMiddleware); router.use((req, res, next) => { const link = req.link; const systemWidePermissions = req.systemWidePermissions; let upn = link.aadupn; if (!upn) { return next(new Error('Must have an active Active Directory link')); } // For performance reasons, this current implementation only works // when the Redis server is the same for both reports and the // app const providers = req.app.settings.providers; const config = providers.config; let reportRedisClient = null; if (providers.witnessRedis) { reportRedisClient = new RedisHelper(providers.witnessRedis); } else if (config.witness && config.witness.redis && config.witness.redis.tls && config.witness.redis.tls === config.redis.tls) { reportRedisClient = new RedisHelper(providers.redisClient); } const reportConfig = config && config.github && config.github.jobs ? config.github.jobs.reports : {}; if (!reportRedisClient || !reportConfig || !reportConfig.witnessEventKey) { return next(new Error('Digest report storage is not enabled for this environment. Reports are not available to be viewed on-demand.')); } const availableReports = []; req.availableReports = availableReports; return reportRedisClient.getObjectCompressed(reportConfig.witnessEventKey, (error, consolidatedReport) => { if (error) { return next(error); } const generated = consolidatedReport.metadata.startedText || 'recently'; const reportsByRecipient = buildRecipientMap(consolidatedReport); // Hard-coded const administratorUpn = 'upn:msftgits<EMAIL>'; const administratorReport = reportsByRecipient.get(administratorUpn); if (systemWidePermissions.allowAdministration && administratorReport) { availableReports.push({ description: `Microsoft-wide report as of ${generated}`, id: administratorUpn, report: administratorReport, }); } const reportIndex = `upn:${upn.toLowerCase()}`; const userReport = reportsByRecipient.get(reportIndex); if (userReport) { availableReports.push({ description: `Your administrator's report as of ${generated}`, report: userReport, id: reportIndex, }); } return next(); }); }); router.get('/administrator/:id', (req, res, next) => { const id = req.params.id; const availableReports = req.availableReports; for (let i = 0; i < availableReports.length; i++) { const availableReport = availableReports[i]; if (availableReport.id === id) { return req.oss.render(req, res, 'settings/digestReportView', availableReport.description, { reportTitle: availableReport.description, github: { consolidated: availableReport.report, }, }); } } return next(new Error('Not found')); }); router.get('/', (req, res) => { const availableReports = req.availableReports; req.oss.render(req, res, 'settings/digestReports', 'Reports', { availableReports: availableReports, }); }); module.exports = router; <file_sep>/views/email/membershipApprovals/decision.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../body block content if wasApproved h1 | Welcome to #{pendingRequest.teamname} &nbsp; small= org p.lead A team maintainer has approved your request to join the GitHub team. else h1 | Your #{pendingRequest.teamname} request &nbsp; small= org p.lead Unfortunately your request was not approved at this time. Your request has been closed. if decisionNote h2 Message from decision maker table(style='width:80%') tbody tr td p.lead= decisionNote h2 Decision maker p = decisionBy br = decisionEmail if pendingRequest.justification h2 Your request table(style='width:80%') tbody tr td p= pendingRequest.justification <file_sep>/business/memberSearch.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const _ = require('lodash'); const async = require('async'); const Q = require('q'); const earlyProfileFetchTypes = new Set(['former', 'active', 'serviceAccount', 'unknownAccount']); const defaultPageSize = 33; // GitHub.com seems to use a value around 33 function MemberSearch(members, options) { options = options || {}; // must be a Map from ID to object with { orgs, memberships, account } if (Array.isArray(members)) { this.members = members; } else { if (!members || !members.values || !members.set) { throw new Error('Members must be a Map.'); } this.members = Array.from(members.values()); } translateMembers(this.members); this.links = options.links; this.getCorporateProfile = options.getCorporateProfile; this.teamMembers = options.teamMembers; this.team2AddType = options.team2AddType; this.pageSize = options.pageSize || defaultPageSize; this.phrase = options.phrase; this.type = options.type; } function translateMembers(members) { // A breaking change altered the projected format members.forEach(member => { if (member.orgs && !member.account) { const orgNames = Object.getOwnPropertyNames(member.orgs); const firstOrg = orgNames[0]; member.account = member.orgs[firstOrg]; } }); } MemberSearch.prototype.search = function search(page, sort) { const self = this; self.page = parseInt(page); self.sort = sort ? sort.charAt(0).toUpperCase() + sort.slice(1) : 'Alphabet'; return Q.all( self.filterByTeamMembers() .associateLinks() .getCorporateProfilesEarly(self.type) .then(() => { return self.filterByType(self.type) .filterByPhrase(self.phrase) .determinePages()['sortBy' + self.sort]() .getPage(self.page) .sortOrganizations() .getCorporateProfiles(); })); }; MemberSearch.prototype.filterByTeamMembers = function () { // If this is a sub-team view, filter by members unless the // special "add a member" experience is present in this route. let teamSet = new Set(); if (this.teamMembers) { for (let i = 0; i < this.teamMembers.length; i++) { const member = this.teamMembers[i]; teamSet.add(member.id); } if (this.team2AddType) { for (let i = 0; i < this.members.length; i++) { const member = this.members[i]; member.isTeamMember = teamSet.has(member.id); } } else { this.members = this.members.filter(m => { return teamSet.has(m.id); }); } } return this; }; MemberSearch.prototype.getCorporateProfilesEarly = function (type) { // This will make a Redis call for every single member, if not cached, // so the early mode is only used in a specific type of view this early. // The default just resolves for a single page of people. if (!earlyProfileFetchTypes.has(type)) { return Q(this); } return this.getCorporateProfiles(); }; MemberSearch.prototype.associateLinks = function () { const links = new Map(); this.links.forEach(link => { const id = parseInt(link.ghid, 10); links.set(id, link); }); this.members.forEach(member => { const link = links.get(member.id); if (link) { member.link = link; } }); return this; }; MemberSearch.prototype.sortOrganizations = function() { this.members.forEach(member => { if (member.orgs && member.orgs.length > 0) { member.orgs = _.sortBy(member.orgs, ['name']); } }); return this; }; function tryGetCorporateProfile(upn, oid, getCorporateProfile, callback) { let profile = null; function getCorporateProfileByMethod(hashKey, field, next) { getCorporateProfile(hashKey, field, hashKey === 'upns' /* JSON */, (error, p) => { if (error) { error = null; // ignore any issue with the specific lookup } else if (p && hashKey !== 'upns') { const newUpn = p; return getCorporateProfileByMethod('upns', newUpn, next); } else if (p) { profile = p; error = true; // shortcut the waterfall } next(error); }); } function getByOid(next) { getCorporateProfileByMethod('aadIds', oid, next); } function getByUpn(next) { getCorporateProfileByMethod('upns', upn, next); } function getByUpnWithEmail(next) { getCorporateProfileByMethod('emailAddresses', upn, next); } const tasks = []; if (upn) { tasks.push(getByUpn); // most efficient } if (oid) { tasks.push(getByOid); // most accurate } if (upn) { tasks.push(getByUpnWithEmail); // common fallback } async.waterfall(tasks, () => { return callback(null, profile); }); } function getProfile(filterType, getCorporateProfile, member) { const deferred = Q.defer(); const projectLinkAsCorporateProfile = filterType !== 'former'; const upn = member.link ? member.link.aadupn : null; const oid = member.link ? member.link.aadoid : null; if (!upn && !oid) { deferred.resolve(); } else { if (member.corporate) { deferred.resolve(member.corporate); } else { tryGetCorporateProfile(upn, oid, getCorporateProfile, (error, profile) => { if (error) { return deferred.reject(error); } if (!profile && projectLinkAsCorporateProfile) { profile = { preferredName: member.link.aadname, userPrincipalName: upn, aadId: oid, }; } if (profile) { member.corporate = profile; } deferred.resolve(); }); } } return deferred.promise; } MemberSearch.prototype.getCorporateProfiles = function () { if (this.getCorporateProfile) { const resolveProfiles = []; this.members.forEach(member => { resolveProfiles.push(getProfile(this.type, this.getCorporateProfile, member)); }); return Q.all(resolveProfiles); } return this; }; MemberSearch.prototype.determinePages = function() { this.totalPages = Math.ceil(this.members.length / this.pageSize); this.totalItems = this.members.length; return this; }; MemberSearch.prototype.getPage = function(page) { this.members = this.members.slice((page - 1) * this.pageSize, ((page - 1) * this.pageSize) + this.pageSize); this.pageFirstItem = 1 + ((page - 1) * this.pageSize); this.pageLastItem = this.pageFirstItem + this.members.length - 1; return this; }; function memberMatchesPhrase(member, phrase) { let linkIdentity = member.link ? `${member.link.aadupn} ${member.link.aadname} ${member.link.ghu} ${member.link.ghid} ` : ''; let accountIdentity = member.login ? member.login.toLowerCase() : member.account.login.toLowerCase(); let combined = (linkIdentity + accountIdentity).toLowerCase(); return combined.includes(phrase); } MemberSearch.prototype.filterByPhrase = function (phrase) { if (phrase) { phrase = phrase.toLowerCase(); this.members = this.members.filter(m => { return memberMatchesPhrase(m, phrase); }); } return this; }; MemberSearch.prototype.filterByType = function (type) { let filter = null; switch (type) { case 'linked': filter = r => { return r.link && r.link.ghid; }; break; case 'unlinked': filter = r => { return !r.link; }; break; case 'unknownAccount': filter = r => { return r.link && r.link.ghid && (!r.corporate || !r.corporate.userPrincipalName); }; break; case 'former': filter = r => { return r.link && r.link.ghid && !r.link.serviceAccount && (!r.corporate || !r.corporate.userPrincipalName); }; break; case 'active': filter = r => { return r.link && r.link.ghid && r.link.aadoid && !r.link.serviceAccount && r.corporate && r.corporate.userPrincipalName; }; break; case 'serviceAccount': filter = r => { return r.link && r.link.serviceAccount; }; break; } if (filter) { this.members = this.members.filter(filter); } return this; }; MemberSearch.prototype.sortByAlphabet = function() { this.members.sort((a, b) => { const aAccountIdentity = a.login ? a.login.toLowerCase() : a.account.login.toLowerCase(); const bAccountIdentity = b.login ? b.login.toLowerCase() : b.account.login.toLowerCase(); if (aAccountIdentity > bAccountIdentity) return 1; if (aAccountIdentity < bAccountIdentity) return -1; return 0; }); return this; }; module.exports = MemberSearch; <file_sep>/views/extensions/npm/publish.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content .container .row: .col-sm-12 .nav ul.pager.zero-pad-bottom li.previous a(href='/' + organization.name + '/repos/' + repository.name) span(aria-hidden=true) &larr; = ' Back to the ' + repository.name + ' repository' if !npmUsername .row: .col-sm-12 h2 Please create and/or register your NPM account p. To enable NPM publishing, you must register your NPM username. After the package has been published, your NPM account will be given collaborator/owner status so that you may publish the package with your own account in the future. ul.list-inline li: a.btn.btn-primary(href='/settings/npm') Register existing NPM username li: a.btn.btn-default(href='https://www.npmjs.com/signup', target='_new') Create an NPMJS account hr p: small. After registering your account, come back to this page to continue package publishing. If you created a new NPMJS account, you will also need to refresh this page first. else .row: .col-sm-12 h1 NPM publishing .row .col-md-8 p.lead A repository with a #[code package.json] file can be published to the NPM registry. form(method='post', action='/' + organization.name + '/repos/' + repository.name + '/extensions/npm/publish') .form-group label(for='branch') Branch to publish select.form-control#branch(name='branch') each branch in branches option(value=branch.name, selected=branch.name == 'master')= branch.name .form-group label NPM collaborator input.form-control(disabled=true, value=npmUsername) .form-group label(for='collaborators') Additional collaborators = ' (optional, comma-separated NPM usernames)' input.form-control#collaborators(name='collaborators', placeholder='Additional collaborators') if userChoiceError .checkbox label input#acknowledge(type='checkbox', name='acknowledge') strong I acknowledge that my package build scripts will not be executed br | Your #[code package.json] file contains scripts that will not be executed, as this is a publishing service and not a full-fledged build provider. p: input.btn.btn-primary(type='submit', value='Publish') .col-md-4 p This self-service tool will publish the branch as an NPM package from an official account. p. After publishing, your NPM account, #[strong= npmUsername] will be able to publish package updates in the future. p You can also authorize other users to publish the package. <file_sep>/routes/org/2fa.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const express = require('express'); const router = express.Router(); const moment = require('moment'); const utils = require('../../utils'); router.get('/', function (req, res, next) { var org = req.org; var onboarding = req.query.onboarding; var joining = req.query.joining; org.oss.addBreadcrumb(req, 'Multi-factor authentication check'); org.queryUserMultifactorStateOk(function (error, state) { if (error) { return next(utils.wrapError(error, `We were unable to validate your security settings with GitHub. The error GitHub gave us: ${error.message || error}`)); } if (state === true && (req.body.validate || onboarding || joining)) { var url = org.baseUrl; if (onboarding || joining) { var urlSegment = '?' + (onboarding ? 'onboarding' : 'joining') + '=' + (onboarding ? onboarding : joining); url = org.baseUrl + (onboarding ? 'profile-review' : 'teams') + urlSegment; } return res.redirect(url); } var title = state === false ? 'Please enable two-factor authentication now' : 'Thanks for using modern security practices'; req.oss.render(req, res, 'org/2fa', title, { twoFactorOff: !state, notValidated: (req.query.validate ? true : undefined), onboarding: onboarding, org: org, nowString: moment().format('MMMM Do YYYY, h:mm:ss a'), }); }); }); module.exports = router; <file_sep>/views/footer.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- footer hr .container if config && config.debug && config.debug.showDebugFooter === true .clearfix .row(style='padding:12px') h6 Instance Configuration .col-md-6.col-lg-6 if correlationId h4 Correlation ID: #{correlationId} if config && config.telemetry && config.telemetry.applicationInsightsKey h4 AppInsights: Instrumented if config && config.telemetry && config.telemetry.googleAnalyticsKey h4 Google Analytics: Tracking if config && config.authentication.scheme h4 | Primary Authentication: #{config.authentication.scheme === 'github' ? 'GitHub' : 'Azure Active Directory'} if config && config.session.encryption !== undefined h4 | Session Encryption: #{config.session.encryption === true ? 'Encrypted' : 'Plain Text'} if config && config.github.links.table.encryption !== undefined h4 | Table Encryption: #{config.github.links.table.encryption === true ? 'Encrypted' : 'Plain Text'} .col-md-6.col-lg-6 if serverName h4 Hostname: #{serverName} if serverAddress h4 Server: #{serverAddress} if websiteHostname h4 Site: #{websiteHostname} if config && config.github.links.table h4 | Storage:&nbsp; span.bg-danger(style='color:white;padding:2px')= config.github.links.table.account h4 | Storage Prefix: #{config.github.links.table.prefix} if config && config.redis h4 | Redis:&nbsp; span.bg-danger(style='color:white;padding:2px')= config.redis.host || config.redis.tls h4 | Redis Prefix: #{config.redis.prefix} .clearfix .row(style='margin-bottom:12px') if user && user.github && user.github.id if config && config.corporate && config.corporate.trainingResources - var footres = config.corporate.trainingResources.footer if footres each categoryList, category in footres div.col-md-3.col-lg-3 h5= category ul.list-unstyled.small each item in categoryList li a(href=item.link)= item.title div.col-md-3.col-lg-3 h5(class='pull-right') a(href='#top', title=headSha) Back to top else .col-sm-12.col-md-12.col-lg-12 h5(class='pull-right') a(href='#top', title=headSha) Back to top .row.small .col-sm-6.col-md-6.col-lg-6 .clearfix ul.list-inline(style='margin-bottom: 0px') if config && config.brand && config.brand.supportMail li a(href='mailto:' + config.brand.supportMail) Contact site administrator li a(href='https://github.com/microsoft/opensource-portal', target='_new') &nbsp;&nbsp;Contribute to this site on GitHub if serverName && correlationId | Powered by&nbsp; a(href='/thanks') great open source,&nbsp; span(title=correlationId + ' ' + serverName) Microsoft Azure,&nbsp; | and the GitHub API else | Powered by <a href="/thanks">great open source</a>, Microsoft Azure, and the GitHub API .col-sm-6.col-md-6.col-lg-6 .pull-right.text-right span &copy; #{config && config.brand.companyName ? config.brand.companyName : ''} .row div(style='margin-bottom: 30px') <file_sep>/views/org/team/members.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content div.container h1 Team Membership if teamMembers if teamMembers.length && teamMembers.length > 0 //- We brute force the API here instead of paging. Not as scaled as the rest... if teamMembers.length > 100 p strong Note: at this time this app does not support paging of team members. If this really bugs you, let us know... table.table thead tr th Avatar th GitHub Username th Authorization Type th Identity th Actions tbody for teamMember in teamMembers tr td p if teamMember.avatar_url img(alt=teamMember.ghu, src=teamMember.avatar_url + '&s=100', style='margin-right:10px;width:50px;height:50px', width=50, height=50, data-user=teamMember.id) td a.btn.btn-sm.btn-muted(href='https://github.com/' + teamMember.login, title=teamMember.id)= teamMember.login td if teamMember.link p Active Directory else p strong Not a portal user br small This user may have been added by a GitHub administrator for the organization. The user is not registered in the table of identities. if teamMember.link td a.btn.btn-sm.btn-muted(href=teamMember.corporateProfileUrl(), target='_new') = teamMember.corporateAlias() || teamMember.link.aadupn else td ? td.twentypercent form(method='get', action=teamUrl + 'members/' + teamMember.login + '/remove') p if teamMember.contactEmail() a.btn.btn-sm.btn-default(href='mailto:' + teamMember.contactEmail()) Send Mail | &nbsp; &nbsp; input.btn.btn-sm.btn-default(type='submit', value='Remove...', data-transition='fade', data-theme='c') hr h2 Add Members p Here you can add employees who have <em>already registered with this portal in the past</em>. The easiest, most consistent experience however is to ask users to self-join using this portal and have you approve their request. p small The list is sorted by corporate identity and also notes the GitHub username associated with that identity. form.form-horizontal(method='post', action=teamUrl + 'members/add') p select.form-control#inputAddMember(name='addMember') each employee in employees option(value=employee.ghid, selected=employee.ghid == user.github.id)= employee.aadupn + ' : ' + employee.ghu br input.btn.btn-primary(type='submit',value='Add to team',data-transition='fade', data-theme='c') p strong Outside Collaborators br | You can also add an "Outside Collaborator" or corporate collaborator to a <em>repo</em>. br a.btn.btn-sm.btn-default(href=teamUrl + 'repos/') Manage Repositories and Collaborators <file_sep>/routes/org/repos.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const _ = require('lodash'); const async = require('async'); const express = require('express'); const extensionsRoute = require('./extensions/'); const lowercaser = require('../../middleware/lowercaser'); const moment = require('moment'); const router = express.Router(); const teamsFilterType = { systemTeamsExcluded: 'systemTeamsExcluded', systemTeamsOnly: 'systemTeamsOnly', }; router.use(function (req, res, next) { req.oss.addBreadcrumb(req, 'Repositories'); req.reposContext = { section: 'repos', org: req.org, pivotDirectlyToOtherOrg: '/repos/', // hack }; req.reposPagerMode = 'org'; next(); }); router.get('/', lowercaser(['sort', 'language', 'type', 'tt']), require('../reposPager')); function sliceCollaboratorsForView(collaborators) { // Slices to the highest permission level for a collaborator const collabView = { readers: [], writers: [], administrators: [], }; collaborators.forEach((collab) => { const permission = collab.permissions; const destination = permission.admin ? collabView.administrators : (permission.push ? collabView.writers : (permission.pull ? collabView.readers : null)); if (destination) { destination.push(collab); } }); return collabView; } function slicePermissionsForView(permissions) { const perms = {}; permissions.forEach(permission => { const level = permission.permission; if (!level) { throw new Error('Invalid operation: no permission associated with the permission entity'); } if (!perms[level]) { perms[level] = []; } perms[level].push(permission); }); return perms; } function calculateRepoPermissions(organization, repository, callback) { repository.getTeamPermissions((getTeamPermissionsError, teamPermissions) => { if (getTeamPermissionsError) { return callback(getTeamPermissionsError); } organization.getOwners((getOwnersError, owners) => { if (getOwnersError) { return callback(getOwnersError); } findRepoCollaboratorsExcludingTeams(repository, teamPermissions, owners, (getCollaboratorsError, collaborators, outsideCollaborators) => { // Get team members async.eachLimit(teamPermissions, 2, (tp, next) => { const team = tp.team; team.getMembers((membersError, members) => { if (!membersError) { tp.members = members; } return next(); }); }, error => { if (error) { return callback(error); } return callback(getCollaboratorsError, teamPermissions, collaborators, outsideCollaborators); }); }); }); }); } function findRepoCollaboratorsExcludingTeams(repository, teamPermissions, owners, callback) { const ownersMap = new Map(); for (let i = 0; i < owners.length; i++) { ownersMap.set(owners[i].id, owners[i]); } const directCollaboratorOptions = { affiliation: 'direct', }; repository.getCollaborators(directCollaboratorOptions, (error, collaborators) => { if (error) { return callback(error); } const outsideCollaboratorOptions = { affiliation: 'outside', }; repository.getCollaborators(outsideCollaboratorOptions, (error, outsideCollaborators) => { if (error) { return callback(error); } function filterOutOwners(collaborator) { const id = collaborator.id; return !ownersMap.has(id); } callback(null, _.filter(collaborators, filterOutOwners), outsideCollaborators); }); }); } router.use('/:repoName', (req, res, next) => { const repoName = req.params.repoName; const organization = req.organization; const repository = organization.repository(repoName); const legacyOrg = req.org; repository.getDetails(error => { if (error) { return next(error); } req.repository = repository; // This is a temporary hack to enable CLA extensions until this old code can be revisited const legacyRepo = legacyOrg.repo(repoName, Object.assign({}, repository)); repository.legacyRepo = legacyRepo; return next(); }); }); router.use('/:repoName', require('../../middleware/github/repoPermissions')); function npmPublishingExtension(operations, repository, callback) { let data = { supported: false, }; const result = { npm: data, }; const config = operations.config; if (!config || !config.npm || !config.npm.publishing || !config.npm.publishing.token) { return callback(null, result); } repository.getContent('package.json', (getContentError) => { if (!getContentError) { data.supported = true; } return callback(null, result); }); } function legacyClaExtension(operations, repository, callback) { let cla = { supported: false, enabled: null, legalEntity: null, mails: null, webhookUrl: null, }; const result = { cla: cla, }; const organization = repository.organization; cla.teams = organization.legalEntityClaTeams; const metadata = organization.getRepositoryCreateMetadata(); if (!metadata.supportsCla) { return callback(null, result); } cla.supported = true; const legacyRepo = repository.legacyRepo; if (!legacyRepo) { return callback(new Error('The legacy repository system is not available to manage this request')); } legacyRepo.hasLegacyClaAutomation((legacyCheckError, enabled, webhookUrl, legalEntity, learnMoreUrl) => { if (legacyCheckError) { return callback(legacyCheckError); } cla.enabled = enabled; cla.learnMoreUrl = learnMoreUrl; if (enabled) { cla.legalEntity = legalEntity; cla.webhookUrl = webhookUrl; } legacyRepo.getLegacyClaSettings((getError, settings) => { if (settings) { cla.mails = settings.NotifierEmails; if (settings.UpdatedOn) { cla.updatedOn = moment.utc(settings.UpdatedOn); } } return callback(null, result); }); }); } function getRepoExtensions(operations, repository, callback) { const extensions = {}; const extensionTypes = [ legacyClaExtension, npmPublishingExtension, ]; async.eachLimit(extensionTypes, 2, (extension, next) => { extension(operations, repository, (error, result) => { if (error) { return next(error); } Object.assign(extensions, result); return next(); }); }, error => { return callback(error, extensions); }); } router.get('/:repoName', (req, res, next) => { const referer = req.headers.referer; const fromReposPage = referer && (referer.endsWith('repos') || referer.endsWith('repos/')); const operations = req.app.settings.operations; const organization = req.organization; const repoPermissions = req.repoPermissions; const repository = req.repository; const uc = operations.getUserContext(req.oss.id.github); return uc.getAggregatedOverview((aggregateError, aggregate) => { repository.getDetails((error) => { if (aggregateError || error) { return next(aggregateError || error); } calculateRepoPermissions(organization, repository, (getPermissionsError, permissions, collaborators, outsideCollaborators) => { if (getPermissionsError) { return next(getPermissionsError); } const systemTeams = combineAllTeams(organization.specialRepositoryPermissionTeams); const teamBasedPermissions = consolidateTeamPermissions(permissions, systemTeams); const title = `${repository.name} - Repository`; getRepoExtensions(operations, repository, (extensionError, extensions) => { if (extensionError) { return next(extensionError); } repository.organization.getDetails((error, details) => { organization.id = details.id; req.oss.render(req, res, 'repos/repo', title, { organization: organization, repo: decorateRepoForView(repository), permissions: slicePermissionsForView(filterSystemTeams(teamsFilterType.systemTeamsExcluded, systemTeams, permissions)), systemPermissions: slicePermissionsForView(filterSystemTeams(teamsFilterType.systemTeamsOnly, systemTeams, permissions)), collaborators: sliceCollaboratorsForView(collaborators), collaboratorsArray: collaborators, outsideCollaboratorsSlice: sliceCollaboratorsForView(outsideCollaborators), outsideCollaborators: outsideCollaborators, // reposDataAgeInformation: ageInformation ? ageInformation : undefined, fromReposPage: fromReposPage, teamSets: aggregateTeamsToSets(aggregate.teams), repoPermissions: repoPermissions, teamBasedPermissions: teamBasedPermissions, extensions: extensions, }); }); }); }); }); }); }); function consolidateTeamPermissions(permissions, systemTeams) { const systemTeamsSet = new Set(systemTeams); const filtered = { // id -> [] array of teams admin: new Map(), push: new Map(), pull: new Map(), }; for (let i = 0; i < permissions.length; i++) { const teamPermission = permissions[i]; const permission = teamPermission.permission; const members = teamPermission.members; const team = teamPermission.team; const isSystemTeam = systemTeamsSet.has(team.id); if (members && !isSystemTeam /* skip system teams */) { for (let j = 0; j < members.length; j++) { const member = members[j]; const map = filtered[permission]; if (map) { let entry = map.get(member.id); if (!entry) { entry = { user: member, teams: [], }; map.set(member.id, entry); } entry.teams.push(team); } } } } const expanded = { readers: Array.from(filtered.pull.values()), writers: Array.from(filtered.push.values()), administrators: Array.from(filtered.admin.values()), }; return expanded.readers.length === 0 && expanded.writers.length === 0 && expanded.administrators.length === 0 ? null : expanded; } function combineAllTeams(systemTeams) { const allTypes = Object.getOwnPropertyNames(systemTeams); const set = new Set(); allTypes.forEach(type => { const values = systemTeams[type]; if (Array.isArray(values)) { for (let i = 0; i < values.length; i++) { set.add(values[i]); } } }); return Array.from(set); } function filterSystemTeams(filterType, systemTeams, teams) { if (filterType !== teamsFilterType.systemTeamsExcluded && filterType !== teamsFilterType.systemTeamsOnly) { throw new Error('Invalid, unsupported teamsFilterType value for filterType'); } const systemSet = new Set(systemTeams); return _.filter(teams, permission => { const team = permission.team; const isSystem = systemSet.has(team.id); return filterType === teamsFilterType.systemTeamsOnly ? isSystem : !isSystem; }); } function decorateRepoForView(repo) { // This should just be a view service of its own at some point fromNow(repo, ['created_at', 'updated_at', 'pushed_at']); return repo; } function fromNow(object, property) { if (Array.isArray(property)) { property.forEach(prop => { fromNow(object, prop); }); return; } if (!object.moment) { object.moment = {}; } let value = object[property]; if (value) { object.moment[property] = moment(value).fromNow(); return object.moment[property]; } } function aggregateTeamsToSets(teams) { const sets = { maintained: teamsToSet(teams.maintainer), member: teamsToSet(teams.member), }; return sets; } function teamsToSet(teams) { const set = new Set(); if (teams) { teams.forEach(team => { set.add(team.id); }); } return set; } function requireAdministration(req, res, next) { const repoPermissions = req.repoPermissions; if (!repoPermissions) { return next(new Error('Not configured for repo permissions')); } if (repoPermissions.allowAdministration === true) { return next(); } return next(new Error('You are not authorized to administer this repository.')); } router.use('/:repoName/extensions/cla', requireAdministration, (req, res, next) => { const operations = req.app.settings.operations; const repository = req.repository; legacyClaExtension(operations, repository, (getClaError, extensionData) => { if (getClaError) { return next(getClaError); } if (!extensionData || !extensionData.cla) { return next(new Error('This organization\'s extension data is currently offline or not configured.')); } const claSettings = extensionData.cla; if (!claSettings.supported) { return next(new Error('This organization has not enabled CLA automation at this time.')); } req.legalClaSettings = claSettings; return next(); }); }); router.get('/:repoName/extensions/cla', function (req, res) { const repository = req.repository; const oss = req.oss; oss.addBreadcrumb(req, 'CLA'); const claSettings = req.legalClaSettings; oss.render(req, res, 'repos/legacyCla', `CLA - ${repository.name}`, { claSettings: claSettings, repository: repository, organization: repository.organization, //repoLegacyClaUrl: req.teamReposUrl + repo.name + '/legacyCla', //mayHaveLegacyCla: mayHaveLegacyCla, //claTeams: req.claTeams, //claWebHookUrl: claWebHookUrl, //repo: repo, }); }); router.post('/:repoName/extensions/cla', (req, res, next) => { const repository = req.repository; const legacyRepo = repository.legacyRepo; const emails = req.body.emails; const currentClaSettings = req.legalClaSettings; const repoRoot = '/' + repository.organization.name + '/repos/' + repository.name; // If legal entity, it is new; otherwise, just updated the e-mail addresses let legalEntity = req.body.legalEntity; const isUpdate = !legalEntity; if (isUpdate) { legalEntity = currentClaSettings.legalEntity; } legacyRepo.enableLegacyClaAutomation({ emails: emails, legalEntity: legalEntity, }, (error) => { if (error) { return next(error); } req.oss.saveUserAlert(req, `${legalEntity} CLA ${isUpdate ? 'updated' : 'configured'} and set to contact ${emails}.`, 'Contribution license agreements', 'success'); res.redirect(repoRoot); }); }); router.use('/:repoName/extensions', extensionsRoute); module.exports = router; <file_sep>/jobs/reports/mailer.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /* eslint no-console: ["error", { allow: ["warn", "dir", "log"] }] */ 'use strict'; // There is an edge case for losing reports - if someone has an e-mail addresses added to a service // account, and also is linked themselves, then it's possible that they may get multiple reports or // may have the reports masked. const fs = require('fs'); const path = require('path'); const pug = require('pug'); const Q = require('q'); const qlimit = require('qlimit'); const emailRender = require('../../lib/emailRender'); function sendReports(context) { const mailProvider = context.operations.providers.mailProvider; if (!mailProvider) { return Q.reject(new Error('No mailProvider is available to send messages')); } const reportsByRecipient = context.reportsByRecipient; if (!reportsByRecipient) { return Q.reject(new Error('No consolidated reports were generated by recipient')); } const overrideSendWithPath = context.settings.fakeSend; if (overrideSendWithPath) { console.warn(`Instead of sending mail, mail will be written to ${overrideSendWithPath}`); try { fs.mkdirSync(overrideSendWithPath); } catch (ignored) { console.log(`While creating directory to store e-mails instead of sending, received: ${ignored.message}`); } } const limit = qlimit(1); const recipients = Array.from(reportsByRecipient.keys()); const sendIndividualReport = sendReport.bind(null, context, mailProvider, reportsByRecipient); return Q.allSettled(recipients.map(limit(sendIndividualReport))) .thenResolve(context); } function resolveAddress(context, upn) { const operations = context.operations; const providers = operations.providers; if (!providers.mailAddressProvider) { return Q.reject(new Error('No mailAddressProvider is available in this application instance')); } const deferred = Q.defer(); providers.mailAddressProvider.getAddressFromUpn(upn, (error, address) => { return error ? deferred.reject(error) : deferred.resolve(address); }); return deferred.promise; } function recipientTypeToAddress(context, address) { const i = address.indexOf(':'); if (i < 0) { return Q.reject(new Error('Invalid consolidated address format')); } const type = address.substr(0, i); const remainder = address.substr(i + 1); if (type === 'mail') { return Q(remainder); } else if (type === 'upn') { return resolveAddress(context, remainder); } else { return Q.reject(`Unsupported consolidated address type ${type}`); } } function consolidatedActionRequired(report) { for (let i = 0; i < report.length; i++) { const definition = report[i].definition; if (definition.isActionRequired) { return true; } } return false; } function renderReport(context, report, address) { const options = { github: { consolidated: report, }, viewServices: context.operations.providers.viewServices, to: address, }; const basedir = path.join(__dirname, 'views'); const view = path.join(basedir, 'administrator.pug'); options.pretty = true; options.basedir = basedir; let html = null; try { html = pug.renderFile(view, options); } catch (renderingProblem) { console.warn(renderingProblem); throw renderingProblem; } return html; } function sendReport(context, mailProvider, reportsByRecipient, recipientKey) { const report = reportsByRecipient.get(recipientKey); const overrideSendWithPath = context.settings.fakeSend; const fromAddress = context.settings.fromAddress; if (!fromAddress && !overrideSendWithPath) { return Q.reject(new Error('No from address is configured for reports in the github.jobs.reports.mail.from value')); } return recipientTypeToAddress(context, recipientKey).then(address => { if (!report || !report.length) { return Q(context); } const isActionRequired = consolidatedActionRequired(report); const classification = isActionRequired ? 'action' : 'information'; const html = renderReport(context, report, address); const viewOptions = { html: html, }; const basedir = path.resolve(__dirname, '../../'); const deferred = Q.defer(); emailRender.render(basedir, 'report', viewOptions, (renderError, mailContent) => { if (renderError) { return deferred.reject(renderError); } // Store the e-mail instead of sending if (overrideSendWithPath) { const filename = path.join(overrideSendWithPath, `${address}.html`); return fs.writeFile(filename, mailContent, 'utf8', error => { if (error) { console.warn(`Trouble writing ${filename} ${error}`); } else { console.log(`Wrote ${filename}`); } return error ? deferred.reject(error) : deferred.resolve(); }); } // Send the e-mail const actionSubject = isActionRequired ? 'Action required: ' : ''; const mailOptions = { to: address, from: fromAddress, subject: `${actionSubject}GitHub digest for ${address}`, classification: classification, headline: isActionRequired ? 'Your GitHub updates' : 'GitHub updates', service: 'Microsoft GitHub', reason: 'This digest report is provided to all managed GitHub organization owners, repository admins, and team maintainers. This report was personalized and sent directly to ' + address, content: mailContent, }; mailProvider.sendMail(mailOptions, (mailError , mailResult) => { const customData = { receipt: mailResult, }; customData.eventName = mailError ? 'JobReportSendFailed' : 'JobReportSendSuccess'; if (mailError) { context.insights.trackException(mailError, customData); } else { context.insights.trackEvent('JobMailProviderReportSent', customData); } return mailError ? deferred.reject(mailError) : deferred.resolve(); }); }); return deferred.promise; }); } module.exports = sendReports; <file_sep>/App_Data/jobs/triggered/digests/README.md # digests web job This WebJob generates a consolidated report across all configured GitHub organizations every few hours. > The initial cron schedule is `0 5 0,4,10,16,22 * * *`. This is designed to help _prime the reports_ that are stored. This is to help keep data up-to-date and also to try and have a good report ready by 6:00 UTC daily. __WARNING:__ If you configure your environment to send reports, this cron frequency needs to change. Right now it would send whenever the reports are generated, which would be multiple times per day, etc.! The job outputs in a few days: - A Redis key storing a compressed, consolidated report JSON object - An optional consolidated report JSON file - Optionally sends e-mails to individual recipients using the ospo-opensource-repos provider for mail - Optionally stores e-mails as rendered HTML snippet files instead of e-mails - Optional Azure storage blob create designed to then be ingested into Azure Data Lake<file_sep>/lib/issue.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // // CONSIDER: Cleanup issue.js. function OpenSourceIssue(repoInstance, issueNumber, optionalInitialData) { this.repo = repoInstance; if (!repoInstance.full_name) { throw new Error('No full_name set for this instance.'); } this.oss = repoInstance.oss; this.number = issueNumber; if (optionalInitialData) { throw new Error('optionalInitialData is not yet supported for the OpenSourceIssue type.'); } } OpenSourceIssue.prototype.createComment = function (body, callback) { this.oss.createGenericGitHubClient().issue(this.repo.full_name, this.number).createComment({ body: body }, callback); }; OpenSourceIssue.prototype.update = function (patch, callback) { this.oss.createGenericGitHubClient().issue(this.repo.full_name, this.number).update(patch, callback); }; OpenSourceIssue.prototype.close = function (callback) { this.oss.createGenericGitHubClient().issue(this.repo.full_name, this.number).update({ state: 'closed', }, callback); }; module.exports = OpenSourceIssue; <file_sep>/lib/mailAddressProvider/microsoftMailAddressProvider.js // // Copyright (c) Microsoft. All rights reserved. // // microsoftMailAddressProvider.js: THIS FILE IS FOR INTERNAL USE AND SHOULD NOT BE OPEN SOURCED AT THIS TIME 'use strict'; const async = require('async'); module.exports = function createMailAddressProvider(options) { const redisClient = options.redisClient; if (!redisClient) { throw new Error('This provide requires a redisClient instance.'); } function getLegalContactInformationFromUpn(upn, callback) { getEntryFromUpn(upn, (entryError, entry) => { if (entryError) { return callback(entryError); } async.whilst( () => { return entry && !entry.legal; }, next => { if (!entry) { return next(new Error('No entry was found while looking up legal contact information')); } if (!entry.userPrincipalName) { return next(new Error('While looking up entries for legal contacts, a user without a UPN was encountered')); } getManagerInformationFromUpn(entry.userPrincipalName, (managerError, manager) => { if (managerError) { return next(managerError); } if (!manager) { throw new Error(''); } entry = manager; return next(); }); }, error => { if (!error && (!entry || !entry.legal)) { error = new Error('Could not retrieve the legal contact'); } if (error) { return callback(error); } let legalInfo = { assignedTo: entry, legalContact: null, }; getEntryFromUpn(entry.legal, (legalError, legal) => { if (legalError) { return callback(legalError); } legalInfo.legalContact = legal; return callback(null, legalInfo); }); }); }); } function getEntryFromUpn(upn, callback) { getCorporateEntry('upns', upn, (redisGetError, entry) => { if (redisGetError) { return callback(redisGetError); } return callback(null, entry); }); } function getEntryFromAlias(alias, callback) { getCorporateEntry('aliases', alias, (redisGetError, entry) => { if (redisGetError) { return callback(redisGetError); } return callback(null, entry); }); } function getManagerInformationFromUpn(upn, callback) { getEntryFromUpn(upn, (error, person) => { if (!error && !person) { error = new Error(`No entry was retrieved for the UPN ${upn}`); } if (!error && !person.manager) { error = new Error(`No manager is known for UPN ${upn}`); } if (error) { return callback(error); } getEntryFromAlias(person.manager, (managerError, manager) => { return callback(managerError ? managerError : null, managerError ? null : manager); }); }); } function getCorporateEntry(hashKey, hashField, expectJson, callback) { if (!callback && typeof(expectJson) === 'function') { callback = expectJson; expectJson = true; } redisClient.hget(hashKey, hashField, (redisGetError, data) => { if (redisGetError) { return callback(redisGetError); } if (!expectJson) { return callback(null, data); } let person = null; if (data) { try { person = JSON.parse(data); } catch (jsonError) { return callback(jsonError); } } callback(null, person); }); } return { getAddressFromUpn: (upn, callback) => { getEntryFromUpn(upn, (error, person) => { if (error) { return callback(error); } if (person && person.emailAddress) { return callback(null, person.emailAddress); } // We fall back down to the UPN to at a bare minimum have // the original logic in play. return callback(null, upn); }); }, getManagerInformationFromUpn: getManagerInformationFromUpn, getLegalContactInformationFromUpn: getLegalContactInformationFromUpn, getCorporateEntry: getCorporateEntry, }; }; <file_sep>/routes/org/team/teamAdminRequired.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; module.exports = (req, res, next) => { const teamPermissions = req.teamPermissions; if (!teamPermissions) { return next(new Error('No team permissions information available')); } if (teamPermissions.allowAdministration === true) { return next(); } const err = new Error('You do not have permission to administer this team'); err.status = 403; return next(err); }; <file_sep>/views/settings/npm.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends layout.pug block content h1 NPM if !npm h3 Please validate your NPMJS account p Your NPM token can be found in your #[code ~/.npmrc] file in your development environment, or you can run #[code npm login] to login locally and then examine that file. form(method='post') .form-group label(for='npmToken') NPM token input.form-control#npmToken(type='text', name='token', placeholder='NPM token') button.btn.btn-default Validate hr p: small IMPORTANT: Your token will not be stored. It will, however, be used to validate your NPM username with the NPMJS registery to integate with NPM publishing features. p: small You can manage and revoke your NPMJS tokens at #[a(href='https://www.npmjs.com/settings/tokens', target='_new') https://www.npmjs.com/settings/tokens]. else p Your NPM username is #[strong #{npm}]. ul.list-inline li: a.btn.btn-muted(href='https://npmjs.org/~' + npm, target='_new') NPM profile for #{npm} li form(method='post', action='/settings/npm/clear') button.btn.btn-default Remove npm account <file_sep>/views/repos/legacyCla.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../layout block content - var repo = repository .container .row: .col-sm-12 .nav ul.pager.zero-pad-bottom li.previous a(href='/' + organization.name + '/repos/' + repository.name) span(aria-hidden=true) &larr; = ' Back to the ' + repository.name + ' repository' - var hugeHeading = false h1(class={huge: hugeHeading}) a(href='https://github.com/' + repo.full_name, target="_blank")= repo.name if repo.private === true | &nbsp; .label.label-warning(class={shrink66: !hugeHeading, shrink50: hugeHeading}) Private h6= repo.full_name.replace('/' + repo.name, '') + ' organization' //-if repo.description p.lead=repo.description hr h1 Contribution license agreement settings .row .col-md-10 if claSettings && claSettings.enabled - var cla = claSettings ul.list-inline.list-horizontal-space li | Status br strong Active if cla.mails li | Notifying br strong= cla.mails if cla.legalEntity li | Legal entity br strong= cla.legalEntity if cla.updatedOn && cla.updatedOn.fromNow li | Configuration updated br strong= cla.updatedOn.fromNow() //-if cla.webhookUrl li | Webhook br strong= cla.webhookUrl else p This repository does not currently have the CLA enabled. h3 #{claSettings && claSettings.enabled ? 'Update' : 'Enable'} settings form.form-horizontal(method='post') h4 Legal entity if claSettings && claSettings.enabled if claSettings.legalEntity p: strong= claSettings.legalEntity p The legal entity cannot be changed after the fact. Please contact operations if this should change. else if claSettings && claSettings.teams select.form-control(name='legalEntity') each team, key in claSettings.teams option(value=key)= key h4 Pull request notifications p To receive notification whenever a pull request is created, provide the e-mail addresses to notify. This address needs to be able to accept e-mail from outside the organization. p: input.form-control(type='text', name='emails', placeholder='Optional comma-separated list of e-mail addresses to set', value= claSettings ? claSettings.mails : null) p: input.btn.btn-primary(type='submit', value='Set') hr p. Contribution License Agreements (CLAs) make accepting contributions and working with the open source community much easier while protecting contributors and copyright holders including the legal entity. p. The CLA bots detect whether someone is contributing a large enough change to require a CLA to be signed, and in the case that it is an employee (or contractor making a contribution within the scope of their statement of work), no CLA may be required, allowing the team to integrate the change even sooner. <file_sep>/jobs/permissions/task.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn", "dir", "log"] }] */ 'use strict'; const async = require('async'); const os = require('os'); const automaticTeams = require('../../webhooks/tasks/automaticTeams'); // Permissions processing: visit all repos and make sure that any designated read, write, admin // teams for the organization are present on every repo. This job is designed to be run relatively // regularly but is not looking to answer "the truth" - it will use the cache of repos and other // assets to not abuse GitHub and its API exhaustively. Over time repos will converge to having // the right permissions. const maxParallelism = 2; module.exports = function run(started, startedString, config) { console.log(`WebJob started ${startedString}`); const app = require('../../app'); config.skipModules = new Set([ 'ossDbProvider', 'web', ]); app.initializeApplication(config, null, error => { if (error) { throw error; } const insights = app.settings.appInsightsClient; if (!insights) { throw new Error('No app insights client available'); } insights.trackEvent('JobPermissionsStarted', { hostname: os.hostname(), }); const operations = app.settings.operations; operations.getRepos((error, repos) => { if (error) { console.dir(error); insights.trackException(error); return process.exit(1); } console.log(`We have a lot of repos: ${repos.length}`); async.eachLimit(repos, maxParallelism, (repo, next) => { const cacheOptions = { maxAgeSeconds: 10 * 60 /* 10m */, backgroundRefresh: false, }; const [/*specialTeams*/, /*specials*/, specialTeamIds, specialTeamLevels] = automaticTeams.processOrgSpecialTeams(repo.organization); repo.getTeamPermissions(cacheOptions, (getError, permissions) => { if (getError) { console.log(`There was a problem getting the permissions for the repo ${repo.name} from ${repo.organization.name}`); console.dir(getError); return next(/* do not shortcut or error out */); } const currentPermissions = new Map(); permissions.forEach(entry => { currentPermissions.set(entry.team.id, entry.permission); }); const teamsToSet = new Set(); specialTeamIds.forEach(specialTeamId => { if (!currentPermissions.has(specialTeamId)) { teamsToSet.add(specialTeamId); } else if (isAtLeastPermissionLevel(currentPermissions.get(specialTeamId), specialTeamLevels.get(specialTeamId))) { // The team permission is already acceptable } else { console.log(`Permission level for ${specialTeamId} is not good enough, expected ${specialTeamLevels.get(specialTeamId)} but currently ${currentPermissions.get(specialTeamId)}`); teamsToSet.add(specialTeamId); } }); const setArray = Array.from(teamsToSet.values()); if (setArray.length > 0) { async.eachLimit(setArray, 1, (teamId, innerCallback) => { const newPermission = specialTeamLevels.get(teamId); console.log(`adding ${teamId} team with permission ${newPermission} to the repo ${repo.name}`); repo.setTeamPermission(teamId, newPermission, innerCallback); }, error => { if (error) { console.log(`${repo.name}`); console.dir(error); } return next(); }); } else { return next(); } }); }, (error) => { if (error) { console.dir(error); insights.trackException(error); return process.exit(1); } console.log('Processed all repos.'); process.exit(0); }); }); }); }; function isAtLeastPermissionLevel(value, expected) { if (value !== 'admin' && value !== 'push' && value !== 'pull') { throw new Error(`The permission type ${value} is not understood by isAtLeastPermissionLevel`); } if (value === expected) { return true; } // Admin always wins if (value === 'admin') { return true; } else if (expected === 'admin') { return false; } if (expected === 'write' && value === expected) { return true; } if (expected === 'read') { return true; } return false; } <file_sep>/views/settings/digestReportView.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ./layout.pug block content .container .nav ul.pager.zero-pad-bottom li.previous a(href='javascript:window.history.back()') span(aria-hidden=true) &larr; = ' Back to reports' h2= reportTitle style(type='text/css') | div.report li > div { display:inline } p This report is generated every few hours using operation GitHub data. It is not a list of real-time issues and may contain cached information. The long-form weekly summary digest is prepared regularly but only sent once a week. div(style='border:2px solid #333; padding: 18px;margin-top:28px') .report include ../../jobs/reports/views/administrator<file_sep>/lib/npm/findNpm.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["warn"] }] */ 'use strict'; const fs = require('mz/fs'); const path = require('path'); const Q = require('q'); const wrapError = require('../../utils').wrapError; function windowsPaths() { if (process.platform === 'win32') { const programFilesDir = process.env['programfiles(x86)'] || process.env.programfiles; return { nodejsDir: path.resolve(programFilesDir, 'nodejs'), npmRootDir: path.resolve(programFilesDir, 'npm'), }; } } module.exports = getNpmPath; function getNpmPath() { const paths = windowsPaths(); if (!paths) { console.warn('This code is designed for execution on a PC.'); return Q('npm'); } const nodeVersion = process.versions.node; const npmLinkPath = path.resolve(paths.nodejsDir, nodeVersion, 'npm.txt'); return getAvailableNpmVersions().then(npmPaths => { return fs.readFile(npmLinkPath, 'utf8').then(version => { const npmPath = npmPaths[version.trim()]; if (!npmPath) { throw new Error(`NPM version ${version} was not found on the system, although the ${npmLinkPath} file specified it as the ideal NPM version to use.`); } return Q(npmPath); }, noLink => { throw wrapError(noLink, `This system does not appear configured either for a standard Node.js installation or for an Azure App Service environment. The NPM redirection file ${npmLinkPath} was not found.`); }); }, (/* standard node install */) => { let bestMatch = null; const candidates = [path.resolve(process.env.programfiles, 'nodejs', 'npm.cmd')]; const x86 = process.env['programfiles(x86)']; if (x86) { candidates.push(path.resolve(x86, 'nodejs', 'npm.cmd')); } return Q.allSettled(candidates.map(function evaluatePotential(loc) { return fs.stat(loc).then(() => { bestMatch = loc; return; }); })).then(() => { return Q(bestMatch); }); }); } function getAvailableNpmVersions() { const paths = windowsPaths(); const npmPaths = {}; return fs.readdir(paths.npmRootDir).then(directories => { return Q.allSettled(directories.map(function validateInstallation(dir) { const binaryPath = process.platform === 'linux' ? path.resolve(paths.npmRootDir, dir, 'node_modules', 'npm', 'bin', 'npm') : path.resolve(paths.npmRootDir, dir, 'npm.cmd'); if (!dir.match(/^\d+\.\d+\.\d+$/)) { return; } return fs.stat(binaryPath).then(() => { npmPaths[dir] = binaryPath; }); })).then(() => { return Q(npmPaths); }); }); } <file_sep>/routes/api/people/links.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const express = require('express'); const jsonError = require('../jsonError'); const router = express.Router(); const wrapError = require('../../../utils').wrapError; const MemberSearch = require('../../../business/memberSearch'); router.use(function (req, res, next) { const apiKeyRow = req.apiKeyRow; if (!apiKeyRow.apis) { return next(jsonError('The key is not authorized for specific APIs', 401)); } const apis = apiKeyRow.apis.split(','); if (apis.indexOf('links') < 0) { return next(jsonError('The key is not authorized to use the get links API', 401)); } return next(); }); router.get('/github/:username', (req, res, next) => { if (req.apiVersion == '2016-12-01') { return next(jsonError('This API is not supported by the API version you are using.', 400)); } const username = req.params.username.toLowerCase(); const operations = req.app.settings.operations; getAllUsers(req.apiVersion, operations, (error, results) => { if (error) { return next(error); } for (let i = 0; i < results.length; i++) { const entry = results[i]; if (entry && entry.github && entry.github.login.toLowerCase() === username) { req.insights.trackMetric('ApiRequestLinkByGitHubUsername', 1); return res.json(entry); } } return next(jsonError('Could not find a link for the user', 404)); }); }); router.get('/aad/:id', (req, res, next) => { if (req.apiVersion == '2016-12-01') { return next(jsonError('This API is not supported by the API version you are using.', 400)); } const id = req.params.id; const operations = req.app.settings.operations; getAllUsers(req.apiVersion, operations, (error, results) => { if (error) { return next(error); } let r = []; for (let i = 0; i < results.length; i++) { const entry = results[i]; if (entry && entry.aad && entry.aad.id === id) { r.push(entry); } } if (r.length === 0) { return next(jsonError('Could not find a link for the user', 404)); } req.insights.trackMetric('ApiRequestLinkByAadId', 1); return res.json(r); }); }); function getAllUsers(apiVersion, operations, callback) { operations.getLinks((linksError, links) => { if (linksError) { linksError = wrapError(linksError, 'There was a problem retrieving link information to display alongside members.'); return callback(jsonError(linksError, 500)); } operations.getMembers(null, {}, (error, members) => { if (error) { error = wrapError(error, 'There was a problem getting the members list.'); return callback(jsonError(error, 500)); } const search = new MemberSearch(members, { type: 'linked', links: links, getCorporateProfile: operations.mailAddressProvider.getCorporateEntry, pageSize: 200000, }); try { search.search(1).then(() => { const sr = search.members; const results = []; sr.forEach(member => { const entry = { github: { id: member.account.id, login: member.account.login, }, }; if (member.orgs) { entry.github.organizations = Object.getOwnPropertyNames(member.orgs); } if (member.corporate) { const corporatePropertyName = apiVersion === '2016-12-01' ? 'corporate' : 'aad'; // This was renamed to be provider name-based entry[corporatePropertyName] = { alias: member.corporate.alias, preferredName: member.corporate.preferredName, userPrincipalName: member.corporate.userPrincipalName, emailAddress: member.corporate.emailAddress, }; const corporateIdPropertyName = apiVersion === '2016-12-01' ? 'aadId' : 'id'; // Now just 'id' entry[corporatePropertyName][corporateIdPropertyName] = member.corporate.aadId; } results.push(entry); }); return callback(null, results); }).catch(callback); } catch (initialError) { return callback(jsonError(initialError, 400)); } }); }); } router.get('/', (req, res, next) => { const operations = req.app.settings.operations; getAllUsers(req.apiVersion, operations, (error, results) => { if (error) { return next(error); } req.insights.trackMetric('ApiRequestLinks', 1); res.json(results); }); }); module.exports = router; <file_sep>/routes/api/index.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const basicAuth = require('basic-auth'); const crypto = require('crypto'); const express = require('express'); const jsonError = require('./jsonError'); const router = express.Router(); const apiClient = require('./client'); const apiPeople = require('./people'); const apiWebhook = require('./webhook'); const OpenSourceUser = require('../../lib/context'); const createRepo = require('./createRepo'); const hardcodedApiVersions = [ '2017-03-08', '2016-12-01', ]; router.use('/client', apiClient); router.use('/webhook', apiWebhook); // Require a "preview" API version: ?api-version=2016-12-01 router.use((req, res, next) => { const apiVersion = req.query['api-version'] || req.headers['api-version']; if (!apiVersion) { return next(jsonError('This endpoint requires that an API Version be provided.', 422)); } if (apiVersion.toLowerCase() === '2016-09-22_Preview'.toLowerCase()) { return next(jsonError('This endpoint no longer supports the original preview version. Please update your client to use a newer version such as ' + hardcodedApiVersions[0], 422)); } if (hardcodedApiVersions.indexOf(apiVersion.toLowerCase()) < 0) { return next(jsonError('This endpoint does not support the API version you provided at this time.', 422)); } req.apiVersion = apiVersion; return next(); }); router.use(function (req, res, next) { const user = basicAuth(req); const key = user? (user.pass || user.name) : null; if (!key) { return next(jsonError('No key supplied', 400)); } const sha1 = crypto.createHash('sha1'); sha1.update(key); const hashValue = sha1.digest('hex'); // { owner, description, orgs (comma-sep list) } const dc = req.app.settings.dataclient; const settingType = 'apiKey'; const partitionKey = settingType; const rowKey = `${settingType}${hashValue}`; dc.getSetting(partitionKey, rowKey, (error, setting) => { const apiEventProperties = { keyHash: hashValue, apiVersion: req.apiVersion, url: req.originalUrl || req.url, }; if (error) { apiEventProperties.failed = true; apiEventProperties.message = error.message; apiEventProperties.statusCode = error.statusCode; } req.insights.trackEvent('ApiRequest', apiEventProperties); if (error) { req.insights.trackMetric('ApiInvalidKey', 1); req.insights.trackException(error); return next(jsonError(error.statusCode === 404 ? 'Key not authorized' : error.message, 401)); } else { req.insights.trackMetric('ApiRequest', 1); req.apiKeyRow = setting; next(); } }); }); router.use('/people', apiPeople); router.use('/:org', function (req, res, next) { const orgName = req.params.org; const apiKeyRow = req.apiKeyRow; if (!apiKeyRow.orgs) { return next(jsonError('There is a problem with the key configuration', 412)); } // '*'' is authorized for all organizations in this configuration environment if (apiKeyRow.orgs !== '*') { const orgList = apiKeyRow.orgs.toLowerCase().split(','); if (orgList.indexOf(orgName.toLowerCase()) < 0) { return next(jsonError('The key is not authorized for this organization', 401)); } } if (!apiKeyRow.apis) { return next(jsonError('The key is not authorized for specific APIs', 401)); } const apis = apiKeyRow.apis.split(','); if (apis.indexOf('createRepo') < 0) { return next(jsonError('The key is not authorized to use the repo create APIs', 401)); } const providers = req.app.settings.providers; const options = { config: req.app.settings.runtimeConfig, dataClient: providers.dataclient, ossDbClient: providers.ossDbConnection, githubLibrary: providers.github, }; new OpenSourceUser(options, function (error, instance) { req.oss = instance; var org; try { org = instance.org(orgName); } catch (ex) { return next(jsonError(ex), 400); } req.org = org; return next(); }); }); router.post('/:org/repos', function (req, res, next) { const convergedObject = Object.assign({}, req.headers); req.insights.trackEvent('ApiRepoCreateRequest', convergedObject); Object.assign(convergedObject, req.body); delete convergedObject.access_token; delete convergedObject.authorization; const token = req.org.setting('ownerToken'); createRepo(req, res, convergedObject, token, next, true /* send the response directly back without the callback */); }); module.exports = router; <file_sep>/middleware/onboarding.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*eslint no-console: ["error", { allow: ["log", "dir"] }] */ const async = require('async'); const github = require('octonode'); const utils = require('../utils'); // ---------------------------------------------------------------------------- // Onboarding helper // ---------------------------------------------------------------------------- // This file is only used when an organization has its "onboarding" value set. // It helps present a dev or devops person with the mapping of team IDs to team // names for the organization. This is not actually a middleware route but // rather a configuration/app initialization method just stored here to keep it // out of the way. // ---------------------------------------------------------------------------- module.exports = function (app, config) { async.each(config.github.organizations.onboarding, function (org, callback) { if (org && org.name && org.ownerToken) { var s = 'Organization Onboarding Helper for "' + org.name + '":\n'; for (var key in org) { s += '- ' + key + ': '; s += (org[key] !== undefined) ? 'value set' : 'undefined'; s += '\n'; } var ghc = github.client(org.ownerToken); var ghorg = ghc.org(org.name); utils.retrieveAllPages(ghorg.teams.bind(ghorg), function (error, teamInstances) { if (!error && teamInstances && teamInstances.length) { s += 'Here is a mapping from team ID to team slug (based on the name),\nto help with selecting the team IDs needed to run the portal\nsuch as the repo approvers and sudoers teams.\n\n'; for (var j = 0; j < teamInstances.length; j++) { var team = teamInstances[j]; s += team.id + ': ' + team.slug + '\n'; } console.log(s); } else if (error) { console.log(`Error retrieving teams for the organization ${org.name}:`); console.dir(error); console.log(s); } callback(); }); } else { console.log('An org requires that its NAME and TOKEN configuration parameters are set before onboarding can begin.'); callback(); } }, function () { console.log('This concludes the execution of the onboarding helper.'); }); }; <file_sep>/views/org/team/approveStatusWithNote.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content div.container if entry.type == 'joinTeam' h1 | Request to join | &nbsp; small.capitalize= entry.teamname else if entry.type == 'repo' h1.capitalize | New #{entry.org} Repo Request | &nbsp; small.capitalize= entry.repoName if entry.type == 'repo' && entry.repoVisibility == 'public' p strong Warning: this is a request for a new public repo which could be immediately visible for the world, journalists, bloggers and more. // CONSIDER: Integrate with directory lookup options... if entry.email p a.btn.btn-sm.btn-default(href='mailto:' + entry.email) Send Mail to #{entry.email} table.table thead tr th GitHub User th Identity if entry.type == 'joinTeam' th Team to join else if entry.type == 'repo' th Repo to create th Issue ID tbody tr td p a.btn.btn-muted.btn-sm(href='https://github.com/' + entry.ghu, target='_new')= entry.ghu td p a.btn.btn-muted.btn-sm(href='mailto:' + requestingUser.contactEmail())= requestingUser.contactEmail() if entry.type == 'joinTeam' td p if team a(href='https://github.com/orgs/' + team.org.name + '/teams/' + (team.slug || team.name), target='_new')= team.name else if entry.type == 'repo' td p= entry.repoName td p //-if team && entry.issue //- NOTE: Commenting out GitHub links with approval repos, since they are being deprecated internally and this is a quick fix a.btn.btn-sm.btn-default(href='https://github.com/' + team.org.name + '/' + team.org.getWorkflowRepository().name + '/issues/' + entry.issue, target='_new')= entry.issue if entry.justification h5 BUSINESS JUSTIFICATION & CONTEXT blockquote= entry.justification h5 ACTIONS if action == 'approve' p Please write your comment relating to why this request has been approved. This may be helpful to future administrations to understand whether this user is a member of the team, doing a specific timebound project, or otherwise may or may not have different future needs. else p To help keep the record straight, please consider noting why this request is being denied. This will be visible to the user who has requested permission, but not recorded in the associated GitHub issue tied to this request. form(method='post', action=teamUrl + 'approvals/' + entry.RowKey) p textarea.form-control(name='text', rows='3', placeholder='Historical note by ' + user.github.username) p if action == 'approve' input.btn.btn-primary(type='submit', name='approve', value='Approve') else input.btn.btn-primary(type='submit', name='deny', value='Deny') | &nbsp; a.btn.btn-default(href='/team/' + entry.teamid + '/approvals/' + entry.RowKey) Cancel hr p small Request ID: #{entry.RowKey} <file_sep>/middleware/passport/encryptionSerializer.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; /*eslint no-console: ["error", { allow: ["warn"] }] */ const async = require('async'); const encryption = require('../../lib/encryption'); const utils = require('../../utils'); const userEncryptedEntities = { azure: new Set(), github: new Set(['accessToken']), githubIncreasedScope: new Set(['accessToken']), }; const userEntityId = { github: 'id', githubIncreasedScope: 'id', azure: 'oid', }; function validateNoRichProperties(properties) { for (const key in properties) { if (properties[key] === undefined || properties[key] === null) { continue; } if (typeof properties[key] === 'object') { console.warn(`The property ${key} is an object. To help with diagnosing the underlying area with the problem, here is the current value of the object:`); console.warn(properties[key]); return new Error(`Session property ${key} is an object.`); } } } function serializeEntity(options, entityName, entity, callback) { const config = options.config; const partitionKey = entityName; const idPropertyName = userEntityId[entityName]; const rowKey = entity[idPropertyName]; const richObjectError = validateNoRichProperties(entity); if (richObjectError !== undefined) { return callback(richObjectError); } if (rowKey === undefined) { return callback(new Error('The unique identifier for the user entity was not available.')); } const keyResolver = options.keyResolver; if (keyResolver === undefined) { return callback(new Error('A key resolver must be supplied to use encryption.')); } const encryptionOptions = { keyEncryptionKeyId: config.session.encryptionKeyId, keyResolver: keyResolver, encryptedPropertyNames: userEncryptedEntities[entityName], binaryProperties: 'base64', }; encryption.encryptEntity(partitionKey, rowKey, entity, encryptionOptions, (encryptError, encryptedEntity) => { if (encryptError) { return callback(utils.wrapError(encryptError, 'There was a problem with the security subsystem starting your session.')); } callback(null, encryptedEntity); }); } function deserializeEntity(options, entityName, entity, callback) { const partitionKey = entityName; const idPropertyName = userEntityId[entityName]; if (idPropertyName === undefined) { return callback(new Error('The entity type is not configured properly.')); } const rowKey = entity[idPropertyName]; if (rowKey === undefined) { return callback(new Error('The unique identifier for the user entity was not available.')); } const keyResolver = options.keyResolver; if (keyResolver === undefined) { return callback(new Error('A key resolver must be supplied to use encryption.')); } const encryptionOptions = { keyResolver: keyResolver, binaryProperties: 'base64', }; encryption.decryptEntity(partitionKey, rowKey, entity, encryptionOptions, (encryptError, decryptedEntity) => { if (encryptError) { const userError = utils.wrapError(encryptError, 'There was a problem with the security subsystem retrieving your session.'); userError.forceSignOut = true; return callback(userError); } callback(null, decryptedEntity); }); } function serialize(options, user, done) { const tasks = {}; for (const entityName in userEncryptedEntities) { const entityPresent = user[entityName]; if (entityPresent !== undefined) { const entityOriginalValue = entityPresent; delete user[entityName]; tasks[entityName] = serializeEntity.bind(null, options, entityName, entityOriginalValue); } } async.parallel(tasks, (error, results) => { if (error) { return done(error); } for (const result in results) { user[result] = results[result]; } return done(null, user); }); } function deserialize(options, user, done) { const tasks = {}; let u = {}; for (const entityName in user) { if (userEncryptedEntities[entityName] !== undefined) { let entityValue = user[entityName]; tasks[entityName] = deserializeEntity.bind(null, options, entityName, entityValue); } } async.parallel(tasks, (error, results) => { if (error) { return done(error); } for (const result in results) { u[result] = results[result]; } for (const unencryptedEntity in user) { if (userEncryptedEntities[unencryptedEntity] === undefined) { u[unencryptedEntity] = user[unencryptedEntity]; } } return done(null, u); }); } function initialize(options, app, serializerInstance) { app._sessionSerializer = serializerInstance; } module.exports = { serialize: serialize, deserialize: deserialize, initialize: initialize, }; <file_sep>/middleware/session.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // const session = require('express-session'); const RedisStore = require('connect-redis')(session); module.exports = function (app, config, redisClient) { var redisOptions = { client: redisClient, ttl: config.redis.ttl, prefix: config.redis.prefix + '.session:', }; var settings = { store: new RedisStore(redisOptions), secret: config.session.salt, name: config.session.name || 'sid', resave: false, saveUninitialized: false, cookie: { maxAge: config.redis.ttl * 1000 /* milliseconds for maxAge, not seconds */ } }; if (!config.webServer.allowHttp) { settings.cookie.secure = true; } if (config.session.domain) { settings.cookie.domain = config.session.domain; } return session(settings); }; <file_sep>/lib/github/restApi.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // 'use strict'; const _ = require('lodash'); const debug = require('debug')('oss-github'); const moment = require('moment'); const Q = require('q'); const querystring = require('querystring'); const url = require('url'); const core = require('./core'); const longtermMetadataMinutes = 60 * 24 * 14; // assumed to be a long time const longtermResponseMinutes = 60 * 24 * 7; // a week, sliding const acceleratedExpirationMinutes = 10; // quick cleanup function createFullContext(api, options, github) { return prepareApiContextForGithub(createApiContextForGithub(api, options), github); } function createApiContextForGithub(api, options) { const apiContext = core.createContext(api, options); return apiContext; } function createApiContextFromLink(github, linkAddress) { const api = 'getPage'; const link = url.parse(linkAddress); const qs = querystring.parse(link.query); const pathArray = _.compact(link.pathname.split('/')); // Translate the path into key/value pairs const options = {}; if (/* odd # */ pathArray.length % 2 !== 0) { options.t = pathArray.pop(); } while (pathArray.length > 0) { const value = pathArray.pop(); const key = pathArray.pop(); options[key] = value; } // If an access_token is provided to the query string, then it is present in // the link. The trouble is this would lead to the need to encrypt Redis, // which is not great. Let's block this here and just use headers for auth. if (qs.access_token) { throw new Error('For security purposes this library was unable to process the provided link.'); } // Merge query string pairs Object.assign(options, qs); const apiContext = createApiContextForGithub(api, options); // Use a fake link to call into the GitHub library via the "next page" const fakeLink = { link: `<${linkAddress}>; rel="next"`, }; apiContext.fakeLink = fakeLink; github.getNextPage.thisInstance = github; // hack! apiContext.apiMethod = github.getNextPage; return apiContext; } function findGitHubMethod(instance, combined) { const i = combined.indexOf('.'); let apiGroup = null; let apiMethodName = combined; if (i >= 0) { apiGroup = combined.substr(0, i); apiMethodName = combined.substr(i + 1); } const group = apiGroup ? instance[apiGroup] : instance; if (!group) { throw new Error(`The GitHub REST API library does not support the API group of type "${apiGroup}".`); } const method = group[apiMethodName]; if (!method) { throw new Error(`The GitHub REST API library does not support the API "${apiMethodName}" within the API group of type "${apiGroup}".`); } return method; } function updateMetadataWithResponse(apiContext, response) { return Q(response); } function prepareApiContextForGithub(apiContext, github) { if (!apiContext.apiMethod) { const method = findGitHubMethod(github, apiContext.api); method.thisInstance = github; apiContext.apiMethod = method; } if (!apiContext.pipeline) { apiContext.pipeline = { withResponseShouldCacheBeServed: interpretGithubResponseIsCacheOk, withMetadataShouldCacheBeServed: usingMetadataIsCacheValid, // githubSkip, withResponseUpdateMetadata: updateMetadataWithResponse, // githubSkip, reduceMetadataToCacheFromResponse: githubReduceMetadataToCache, callApi: callGithubApi, getResponseMetadata: getGithubResponseMetadata, processMetadataBeforeCall: processGithubMetadataBeforeCall, finish: null, cache: { minutes: { longtermMetadata: longtermMetadataMinutes, longtermResponse: longtermResponseMinutes, acceleratedExpiration: acceleratedExpirationMinutes, }, apiTypePrefix: 'github#', }, }; } apiContext.redisKey.root = core.redisKeyForApi(apiContext.pipeline.cache.apiTypePrefix, apiContext.api, apiContext.options); apiContext.redisKey.metadata = apiContext.redisKey.root ? apiContext.redisKey.root + core.redisKeyAspectSuffix('meta') : core.redisKeyForApi(apiContext.pipeline.cache.apiTypePrefix, apiContext.api, apiContext.options, 'meta'); return apiContext; } /* // TODO: Need to audit uses of github.post, github.call to figure out if this skip provider should still be used in places function githubSkip() { // For direct GitHub calls we always go direct to GitHub with the e-tag return Q(false); } */ function usingMetadataIsCacheValid(apiContext, metadata) { // result can be falsy OR an object; { cache: true, refresh: true } // cache: whether to use the cache, if available // refresh: whether to refresh in the background for a newer value let shouldServeCache = false; const maxAgeSeconds = apiContext.maxAgeSeconds; const updatedIso = metadata ? metadata.updated : null; const refreshingIso = metadata ? metadata.refreshing : null; if (metadata && !updatedIso) { debug(`${apiContext.redisKey.metadata} entity without updated date found`); } if (apiContext.generatedRefreshId) { debug(`${apiContext.redisKey.metadata} this is technically a refresh operation right now behind the scenes`); } if (maxAgeSeconds && updatedIso) { const updated = moment(updatedIso); const calledTime = apiContext.calledTime; if (updated.add(maxAgeSeconds, 'seconds').isAfter(calledTime)) { shouldServeCache = true; shouldServeCache = { cache: true, remaining: 'expires in ' + moment(updatedIso).add(maxAgeSeconds, 'seconds').fromNow(), }; // debug('cache OK to serve as last updated was ' + updated); } else if (apiContext.backgroundRefresh) { let shouldRefresh = true; debug(apiContext.redisKey.metadata + ' need to go live as last updated ' + updated.format() + ' and our max seconds value is ' + maxAgeSeconds); if (refreshingIso) { let secondsToAllowForRefresh = 2 + (core.delayBeforeRefreshMilliseconds / 1000); if (Array.isArray(metadata.pages)) { secondsToAllowForRefresh += (metadata.pages.length * 1.25); } secondsToAllowForRefresh = Math.round(secondsToAllowForRefresh); const refreshWindow = moment(refreshingIso).add(secondsToAllowForRefresh, 'seconds'); if (moment().utc().isAfter(refreshWindow)) { debug(`Another worker\'s refresh did not complete. Refreshing in this instance. ${apiContext.redisKey.metadata}`); } else { shouldRefresh = false; debug(`A refresh is already being processed by another worker. Allowing a window of ${secondsToAllowForRefresh}s before retry. ${apiContext.redisKey.metadata}`); } } shouldServeCache = { cache: true, refresh: shouldRefresh, }; } } else { if (!metadata) { debug('api: empty/no metadata ' + apiContext.redisKey.metadata); } else { debug('api: no updated ' + apiContext.redisKey.metadata); } } return Q(shouldServeCache); } function processGithubMetadataBeforeCall(apiContext, metadata) { if (metadata && metadata.etag) { apiContext.etag = metadata.etag; apiContext.metadata = metadata; } return metadata; } function callGithubApi(apiContext) { const token = apiContext.token; const headers = { Authorization: `token ${token}`, }; if (apiContext.options.headers) { apiContext.headers = apiContext.options.headers; Object.assign(headers, apiContext.headers); } if (apiContext.etag) { headers['If-None-Match'] = apiContext.etag; } ++apiContext.cost.github.restApiCalls; const args = []; const apiMethod = apiContext.apiMethod; if (apiContext.fakeLink) { args.push(apiContext.fakeLink, headers); } else { const argOptions = Object.assign({}, apiContext.options); argOptions.headers = headers; args.push(argOptions); } const thisArgument = apiMethod.thisInstance || null; return apiMethod.apply(thisArgument, args); } function getGithubResponseMetadata(apiContext, response) { return Q(response.meta); } function interpretGithubResponseIsCacheOk(apiContext, response) { if (response === undefined) { throw new Error('The response was undefined and unable to process.'); } if (!response.meta) { throw new Error('No metadata was provided alongside the GitHub API response.'); } let retryAfter = response.meta['retry-after']; if (retryAfter) { debug(`Retry-After header was present: ${retryAfter}`); } const rateLimitRemaining = response.meta['x-ratelimit-remaining']; if (rateLimitRemaining) { apiContext.cost.github.remainingApiTokens = rateLimitRemaining; } let statusCode = 0; if (response && response.meta && response.meta.status) { let status = response.meta.status || ''; let i = status.indexOf(' '); statusCode = parseInt(i >= 0 ? status.substr(0, i) : status); response.meta.statusActual = statusCode; } let cacheOk = false; if (statusCode === 304) { const displayInfo = apiContext.redisKey ? apiContext.redisKey.root : ''; debug(`304: Use existing cache ${displayInfo}`); ++apiContext.cost.github.cacheHits; cacheOk = true; } else if (statusCode < 200 || statusCode >= 300) { // The underlying library I believe actually processes these conditions as errors anyway throw new Error(`Response code of ${statusCode} is not currently supported in this system.`); } return Q(cacheOk); } function githubReduceMetadataToCache(apiContext, response) { const metadata = response ? response.meta : null; if (metadata && metadata.etag) { let reduced = { etag: metadata.etag, }; if (metadata.link) { reduced.link = metadata.link; } /* let requestId = metadata['x-github-request-id']; if (requestId) { reduced['x-github-request-id'] = requestId; } */ // CONSIDER: can parse last-modified and store it as 'changed' UTC let calledTime = apiContext.calledTime ? apiContext.calledTime.format() : moment().utc().format(); reduced.updated = calledTime; return reduced; } return metadata; } function wrapCreatePage(libraryContext, github, kind) { return function(token, link, callback) { getPage(libraryContext, github, token, link, kind, callback); }; } function getPage(libraryContext, github, token, link, which, callback) { const url = getPageLink(github, link.meta.link, which); if (!url) { return callback(new Error('No GitHub collection link was present in the response.')); } const apiContext = prepareApiContextForGithub(createApiContextFromLink(github, url), github); apiContext.token = token; apiContext.libraryContext = libraryContext; core.execute(apiContext, callback); } function getPageLink(github, link, which) { let method = null; switch (which) { case 'next': method = github.hasNextPage; break; case 'prev': method = github.hasPreviousPage; break; case 'last': method = github.hasLastPage; break; case 'first': method = github.hasFirstPage; break; default: return null; } if (method) { return method.call(github, link); } } module.exports = { create: createFullContext, wrapCreatePage: wrapCreatePage, findGitHubMethod: findGitHubMethod, }; <file_sep>/views/org/team/approveStatus.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ../../layout block content div.container //- This can be used for local debugging if needed: //- pre= JSON.stringify(entry, undefined, 2) if entry.type == 'repo' if entry.repoVisibility == 'public' h1 New public #{entry.org} Repository Request else h1 New private #{entry.org} Repository Request else h1 Open Source Access Request if entry.active === true if entry.type == 'repo' p.lead Please review this repository creation request on behalf of the #{entry.org} organization. p Keep in mind ideal naming patterns and best practices for this organization. Feel free to take the time to communicate with other approvers over e-mail or modify the request prior to creating the repo. if entry.repoVisibility == 'private' p Private repositories count against our organization's private repo quota. Consider whether this repo and its business justification imply that the project is heading toward an open source release soon. You may recommend to the maintainer that instead they use a different corporate source control system until ready to go public, for example. else p.lead Please review this pending permission request else p.lead This request has already been closed. NO ACTION REQUIRED. p form(method='post') input.btn.btn-sm.btn-default(type='submit', name='reopen', value='Re-open') if entry.requestedTime && entry.requestedTime.toISOString p | This request was opened by&nbsp; a(href='https://github.com/' + entry.ghu, target='_new')= entry.ghu | &nbsp; time(datetime=entry.requestedTime.toISOString())= entry.requestedTime.toDateString() p form(method='post', action=teamUrl + 'approvals/' + entry.RowKey) if entry.active === true input.btn.btn-primary.btn-sm(type='submit', name='approve', value='Approve') | &nbsp; input.btn.btn-sm.btn-default(type='submit', name='approveWithComment', value='Approve with Comment...') | &nbsp; input.btn.btn-sm.btn-default(type='submit', name='deny', value='Deny...') | &nbsp; if entry.type == 'repo' a.btn.btn-sm.btn-muted(href=(teamUrl + 'approvals/' + entry.RowKey + '/edit')) Edit Request as an approver else // At this time, editing of standard permission requests is not implemented. if entry.decision table.table.table-bordered thead tr th Decision Status th Decision Made th Decision Maker tbody tr td if entry.decision p= entry.decision.toUpperCase() td if entry.decisionTime p time(datetime=entry.decisionTime.toISOString())= entry.decisionTime.toDateString() else p Unknown td p if entry.decisionBy && entry.decisionEmail a(href='mailto:' + entry.decisionEmail)= entry.decisionBy else = entry.decisionBy | &nbsp; = entry.decisionEmail if entry.decisionNote tr td(colspan='3') p strong Note by decision maker: br = entry.decisionNote if entry.justification table.table.table-bordered thead tr th Justification provided by user tbody tr td= entry.justification //- REPO CREATE DETAILS if entry.type == 'repo' table.table.table-bordered thead tr th Repo Name th Organization th Initial Visibility tbody tr td p(style='font-size:225%')= entry.repoName td p= entry.org td p if entry.repoVisibility == 'public' strong(style='color:red') PUBLIC else | Private table.table.table-bordered thead tr th Public Repo Description & Optional URL tbody tr td p= entry.repoDescription if entry.repoUrl p small a(href=entry.repoUrl, target='_new') i= entry.repoUrl if expandedTeamInfo table.table.table-bordered thead tr th(colspan='3') Team Permissions tr th Team ID th Team Name th Permission Level for Repository tbody each eti in expandedTeamInfo tr td= eti.id td= eti.name td.capitalize= eti._temporary_permission table.table.table-bordered thead tr if entry.type !== 'repo' th Team Requested //-if entry.issue //- NOTE: Commenting out GitHub links with approval repos, since they are being deprecated internally and this is a quick fix th GitHub Tracking Issue tbody tr if entry.type !== 'repo' td p a(href='https://github.com/orgs/' + entry.org + '/teams/' + entry.teamname, target='_new')= entry.teamname //-if entry.issue //- NOTE: Commenting out GitHub links with approval repos, since they are being deprecated internally and this is a quick fix td p a.btn.btn-sm.btn-default(href='https://github.com/' + team.org.getWorkflowRepository().full_name + '/issues/' + entry.issue, target='_new')= entry.issue table.table.table-bordered thead tr th GitHub Account th Corporate Identity tbody tr td p if requestingUser.avatar() img(alt=requestingUser.login, src=requestingUser.avatar(80) + '&s=80', style='margin-right:10px;width:30px;height:30px') a.btn.btn-sm.btn-muted(href='https://github.com/' + requestingUser.login, target='_new')= requestingUser.login td p if requestingUser.link && requestingUser.corporateAlias() if requestingUser.link.aadname a.btn.btn-sm.btn-muted(href=requestingUser.corporateProfileUrl(), target='_new')= requestingUser.link.aadname if requestingUser.corporateAlias() a.btn.btn-sm.btn-muted-more(href=requestingUser.corporateProfileUrl(), target='_new') = requestingUser.corporateAlias() + ' ' i.glyphicon.glyphicon-share-alt else if requestingUser.link.aadupn = requestingUser.link.aadupn else | UNKNOWN ALIAS | &nbsp; if entry.email a.btn.btn-sm.btn-default(href='mailto:' + entry.email) Send Mail <file_sep>/jobs/reports/views/administrator.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends layout block content //- View services - var fileSize = viewServices.fileSize - var moment = viewServices.moment - var _ = viewServices._ //- Styles - var groupByTableCellStyle = 'background-color:#ddd;padding:6px;margin-top:4px' - var actionTextStyle = 'color:red' - var standardColumnMarginBottom = '2px' - var standardColumnVerticalAlign = 'top' //- Report (there should only be one) - var isDigestService = digestRunDate && true - var report = github && github.consolidated ? (isDigestService ? github.consolidated[0] : github.consolidated) : [] if report //- Report for entry in report - var definition = entry.definition - var list = entry.list - var table = entry.table h3= definition.title || definition.name if definition.description p= definition.description if definition.actionText p(style=actionTextStyle)= definition.actionText if list && list.listItems if list.heading p= list.heading - var groupByField = list.groupBy || 'just-group-everything-together' - var currentGroupByValue = null - var grouping = _.groupBy(list.listItems, groupByField) each groupSet, heading in grouping if typeof(heading) !== 'undefined' h4= heading each li in groupSet if li && li.text li: div(style={ color: li.color }) = li.text if li.actions each action in li.actions = ' ' a(href=action.link)= action.text || action.link if table - var groupByField = table.groupBy || null - var currentGroupByValue = null table if table.columns thead each columnTitle, columnName in table.columns th= columnTitle if table.rows tbody each row in table.rows if table.columns //- Group by a field - var columnCount = Object.getOwnPropertyNames(table.columns).length if groupByField - var localGroupByValue = row[groupByField] if localGroupByValue !== currentGroupByValue - currentGroupByValue = localGroupByValue if localGroupByValue tr td(colspan=columnCount, style=groupByTableCellStyle) h4= localGroupByValue tr each columnTitle, columnName in table.columns - var tableColumnWidth = table.columnWidths && table.columnWidths[columnName] ? table.columnWidths[columnName] : null td(style={ width: tableColumnWidth, 'vertical-align': standardColumnVerticalAlign, 'margin-bottom': standardColumnMarginBottom, }) - var localValue = row[columnName] if localValue if typeof(localValue) === 'object' if localValue.html != localValue.html else if localValue.link && localValue.text a(href=localValue.link)= localValue.text else if localValue.actions //- CONSIDER: list-inline actions each action in localValue.actions a(href=action.link)= action.text || action.link | &nbsp; &nbsp; else if localValue.text span(style={ color: localValue.color })= localValue.text else small= JSON.stringify(localValue, undefined, 2) else = localValue else tr: td No column headings defined, this report definition is not currently supported p &nbsp; hr //- Explain the weekly digest if report.metadata && report.metadata.dayOfWeek - var recipientDisplay = recipient && recipient.name ? recipient.name : 'you' if report.metadata.isWeeklyDigest p: small: em This is a weekly summary prepared each #{report.metadata.dayOfWeek} for #{recipientDisplay}#{report.metadata.startedText ? ' using GitHub data as of ' + report.metadata.startedText : ''} else p: small: em This is an activity report for #{recipientDisplay}#{report.metadata.startedText ? ' using GitHub data as of ' + report.metadata.startedText : ''} if report.reasons && report.reasons.length p: small You're receiving this digest because of your role in the following resource#{report.reasons.length === 1 ? '' : 's'}: ul each reason in report.reasons li: small= reason <file_sep>/lib/mailProvider/customMailService.js // // Copyright (c) Microsoft. All rights reserved. // // customMailService.js: THIS FILE IS FOR INTERNAL USE AND SHOULD NOT BE OPEN SOURCED AT THIS TIME 'use strict'; const request = require('request'); function pop(obj, key) { const val = obj[key]; delete obj[key]; return val; } function sendMail(mailConfig, options, callback) { const serviceUrl = mailConfig.customService.url; if (!serviceUrl) { return callback(new Error('No custom mail service provider endpoint configured.')); } const auth = { username: mailConfig.customService.username, password: mailConfig.customService.apiKey, sendImmediately: true, }; let from = pop(options, 'from') || mailConfig.from; let to = pop(options, 'to'); if (!to) { return callback(new Error('The e-mail must have a receipient.')); } if (typeof to !== 'string' && to.join) { to = to.join(', '); } const subject = pop(options, 'subject'); if (!subject) { return callback(new Error('The e-mail must have a subject.')); } const content = pop(options, 'content'); if (!content) { return callback(new Error('A message must include content.')); } let cc = pop(options, 'cc'); if (cc && typeof cc !== 'string' && cc.join) { cc = cc.join(', '); } const classification = pop(options, 'classification'); if (!classification) { return callback(new Error('The custom mail service provider requires a classification value.')); } if (classification !== 'warning' && classification !== 'information' && classification !== 'action') { return callback(new Error(`The custom mail service provider does not recognize the classification value of "${classification}".`)); } // Optional template fields: headline, reason const headline = pop(options, 'headline'); const reason = pop(options, 'reason'); const service = pop(options, 'service'); const correlationId = pop(options, 'correlationId'); const customMailPost = { to: to, cc: cc, from: from, subject: subject, body: content, headline: headline, reason: reason, service: service, template: classification, correlationId: correlationId, }; request.post({ auth: auth, form: customMailPost, url: serviceUrl, }, (httpError, response, body) => { if (response.statusCode >= 300) { httpError = new Error(`Mail could not be sent, the mail service returned a status code of ${response.statusCode}`); } callback(httpError || null, httpError ? null : body); }); } module.exports = function createCustomMailService(config) { const customServiceConfig = config.mail.customService; const appVersion = config.logging.version; if (customServiceConfig.version !== 'prototype') { throw new Error(`The custom mail service version "${customServiceConfig.version}" is not supported in this release.`); } config.mail.customService.username = 'custom'; return { info: `customMailService-${customServiceConfig.version} v${appVersion}`, html: true, sendMail: sendMail.bind(undefined, config.mail), }; }; <file_sep>/views/settings/index.pug //- //- Copyright (c) Microsoft. All rights reserved. //- Licensed under the MIT license. See LICENSE file in the project root for full license information. //- extends ./layout.pug block content div.container h1 Profile p You can edit your profile directly on GitHub.com. p: a.btn.btn-sm.btn-primary(href='https://github.com/settings/profile', target='_new') Edit GitHub profile <file_sep>/routes/org/team/index-maintainer.js // // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // var express = require('express'); var router = express.Router(); var utils = require('../../../utils'); var approvalsRoute = require('./approvals'); var membersRoute = require('./members'); router.use(function (req, res, next) { const teamPermissions = req.teamPermissions; if (!teamPermissions.allowAdministration) { const err = utils.wrapError(null, 'You do not have permission to maintain this team.', true); err.detailed = 'These aren\'t the droids you are looking for.'; err.status = 403; err.skipLog = true; return next(err); } return next(); }); router.use('/approvals', approvalsRoute); router.use('/members', membersRoute); module.exports = router;
96beaeed8f3f3c8813f31a839489d6c68ff69a8b
[ "SCSS", "Markdown", "JavaScript", "TypeScript", "Less", "Pug" ]
173
SCSS
timothystewart6/opensource-portal
3b5831ce81f985fc8d7a465bd5432090edae0449
89211e54bb5e0bd19f2e57eba0c7351c15c4f1b8
refs/heads/master
<repo_name>rust-lang/regex<file_sep>/regex-automata/tests/hybrid/api.rs use std::error::Error; use regex_automata::{ hybrid::dfa::{OverlappingState, DFA}, nfa::thompson, HalfMatch, Input, MatchError, }; // Tests that too many cache resets cause the lazy DFA to quit. // // We only test this on 64-bit because the test is gingerly crafted based on // implementation details of cache sizes. It's not a great test because of // that, but it does check some interesting properties around how positions are // reported when a search "gives up." // // NOTE: If you change something in lazy DFA implementation that causes this // test to fail by reporting different "gave up" positions, then it's generally // okay to update the positions in the test below as long as you're sure your // changes are correct. Namely, it is expected that if there are changes in the // cache size (or changes in how big things are inside the cache), then its // utilization may change slightly and thus impact where a search gives up. // Precisely where a search gives up is not an API guarantee, so changing the // offsets here is OK. #[test] #[cfg(target_pointer_width = "64")] #[cfg(not(miri))] fn too_many_cache_resets_cause_quit() -> Result<(), Box<dyn Error>> { // This is a carefully chosen regex. The idea is to pick one that requires // some decent number of states (hence the bounded repetition). But we // specifically choose to create a class with an ASCII letter and a // non-ASCII letter so that we can check that no new states are created // once the cache is full. Namely, if we fill up the cache on a haystack // of 'a's, then in order to match one 'β', a new state will need to be // created since a 'β' is encoded with multiple bytes. // // So we proceed by "filling" up the cache by searching a haystack of just // 'a's. The cache won't have enough room to add enough states to find the // match (because of the bounded repetition), which should result in it // giving up before it finds a match. // // Since there's now no more room to create states, we search a haystack // of 'β' and confirm that it gives up immediately. let pattern = r"[aβ]{99}"; let dfa = DFA::builder() .configure( // Configure it so that we have the minimum cache capacity // possible. And that if any resets occur, the search quits. DFA::config() .skip_cache_capacity_check(true) .cache_capacity(0) .minimum_cache_clear_count(Some(0)), ) .thompson(thompson::NFA::config()) .build(pattern)?; let mut cache = dfa.create_cache(); let haystack = "a".repeat(101).into_bytes(); let err = MatchError::gave_up(25); // Notice that we make the same amount of progress in each search! That's // because the cache is reused and already has states to handle the first // N bytes. assert_eq!( Err(err.clone()), dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) ); assert_eq!( Err(err.clone()), dfa.try_search_overlapping_fwd( &mut cache, &Input::new(&haystack), &mut OverlappingState::start() ), ); let haystack = "β".repeat(101).into_bytes(); let err = MatchError::gave_up(2); assert_eq!( Err(err), dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) ); // no need to test that other find routines quit, since we did that above // OK, if we reset the cache, then we should be able to create more states // and make more progress with searching for betas. cache.reset(&dfa); let err = MatchError::gave_up(27); assert_eq!( Err(err), dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) ); // ... switching back to ASCII still makes progress since it just needs to // set transitions on existing states! let haystack = "a".repeat(101).into_bytes(); let err = MatchError::gave_up(13); assert_eq!( Err(err), dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) ); Ok(()) } // Tests that quit bytes in the forward direction work correctly. #[test] fn quit_fwd() -> Result<(), Box<dyn Error>> { let dfa = DFA::builder() .configure(DFA::config().quit(b'x', true)) .build("[[:word:]]+$")?; let mut cache = dfa.create_cache(); assert_eq!( dfa.try_search_fwd(&mut cache, &Input::new("abcxyz")), Err(MatchError::quit(b'x', 3)), ); assert_eq!( dfa.try_search_overlapping_fwd( &mut cache, &Input::new(b"abcxyz"), &mut OverlappingState::start() ), Err(MatchError::quit(b'x', 3)), ); Ok(()) } // Tests that quit bytes in the reverse direction work correctly. #[test] fn quit_rev() -> Result<(), Box<dyn Error>> { let dfa = DFA::builder() .configure(DFA::config().quit(b'x', true)) .thompson(thompson::Config::new().reverse(true)) .build("^[[:word:]]+")?; let mut cache = dfa.create_cache(); assert_eq!( dfa.try_search_rev(&mut cache, &Input::new("abcxyz")), Err(MatchError::quit(b'x', 3)), ); Ok(()) } // Tests that if we heuristically enable Unicode word boundaries but then // instruct that a non-ASCII byte should NOT be a quit byte, then the builder // will panic. #[test] #[should_panic] fn quit_panics() { DFA::config().unicode_word_boundary(true).quit(b'\xFF', false); } // This tests an intesting case where even if the Unicode word boundary option // is disabled, setting all non-ASCII bytes to be quit bytes will cause Unicode // word boundaries to be enabled. #[test] fn unicode_word_implicitly_works() -> Result<(), Box<dyn Error>> { let mut config = DFA::config(); for b in 0x80..=0xFF { config = config.quit(b, true); } let dfa = DFA::builder().configure(config).build(r"\b")?; let mut cache = dfa.create_cache(); let expected = HalfMatch::must(0, 1); assert_eq!( Ok(Some(expected)), dfa.try_search_fwd(&mut cache, &Input::new(" a")), ); Ok(()) } <file_sep>/regex-cli/args/hybrid.rs use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::{hybrid, nfa::thompson::NFA, MatchKind}, }; use crate::args::{self, flags, Configurable, Usage}; /// Exposes the configuration of a lazy DFA. #[derive(Debug, Default)] pub struct Config { hybrid: hybrid::dfa::Config, } impl Config { /// Return a `hybrid::dfa::Config` object from this configuration. pub fn hybrid(&self) -> anyhow::Result<hybrid::dfa::Config> { Ok(self.hybrid.clone()) } /// Returns a new configuration that compiles a reverse lazy DFA from a /// reverse NFA. The caller is responsible for reversing the NFA. pub fn reversed(&self) -> Config { let hybrid = self.hybrid.clone().prefilter(None).match_kind(MatchKind::All); Config { hybrid } } /// Build a lazy DFA from the NFA given. /// /// Building a lazy DFA is generally cheap. It only does a little bit of /// work, but otherwise, the actual determinization process is carried out /// on demand at search time. pub fn from_nfa(&self, nfa: &NFA) -> anyhow::Result<hybrid::dfa::DFA> { hybrid::dfa::Builder::new() .configure(self.hybrid()?) .build_from_nfa(nfa.clone()) .context("failed to compile lazy DFA") } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('k') | Arg::Long("match-kind") => { let kind: flags::MatchKind = args::parse(p, "-k/--match-kind")?; self.hybrid = self.hybrid.clone().match_kind(kind.kind); } Arg::Long("starts-for-each-pattern") => { self.hybrid = self.hybrid.clone().starts_for_each_pattern(true); } Arg::Short('C') | Arg::Long("no-byte-classes") => { self.hybrid = self.hybrid.clone().byte_classes(false); } Arg::Long("unicode-word-boundary") => { self.hybrid = self.hybrid.clone().unicode_word_boundary(true); } Arg::Long("quit") => { let set: flags::ByteSet = args::parse(p, "--quit")?; for &byte in set.0.iter() { self.hybrid = self.hybrid.clone().quit(byte, true); } } Arg::Long("specialize-start-states") => { self.hybrid = self.hybrid.clone().specialize_start_states(true); } Arg::Long("cache-capacity") => { let capacity = args::parse(p, "--cache-capacity")?; self.hybrid = self.hybrid.clone().cache_capacity(capacity); } Arg::Long("skip-cache-capacity-check") => { self.hybrid = self.hybrid.clone().skip_cache_capacity_check(true); } Arg::Long("minimum-cache-clear-count") => { let min = args::parse_maybe(p, "--minimum-cache-clear-count")?; self.hybrid = self.hybrid.clone().minimum_cache_clear_count(min); } Arg::Long("minimum-bytes-per-state") => { let min = args::parse_maybe(p, "--minimum-bytes-per-state")?; self.hybrid = self.hybrid.clone().minimum_bytes_per_state(min); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ flags::MatchKind::USAGE, Usage::new( "--starts-for-each-pattern", "Add anchored start states for each pattern.", r#" Add anchored start states for each pattern. This permits running an anchored search for a specific pattern using the --pattern-id flag. (Assuming this is a search command.) "#, ), Usage::new( "-C, --no-byte-classes", "Disable byte classes.", r#" This causes all bytes to be an equivalence class unto themselves. By default, bytes are grouped into equivalence classes to reduce the size of the alphabet for a DFA, and therefore decreases overall space usage. It can be quite convenient to disable byte classes when looking at the debug representation of a DFA. Otherwise, the transitions are much harder for a human to read. "#, ), Usage::new( "--unicode-word-boundary", "Enable heuristic support for Unicode word boundaries.", r#" Enable heuristic support for Unicode word boundaries. When enabled, a DFA will treat a Unicode word boundary as if it were an ASCII boundary, but will quit if it sees any non-ASCII byte. This is disabled by default, in which case, attempting to compile a DFA with a Unicode word boundary will result in an error. Note that enabling this is very similar to using the --quit flag and providing every non-ASCII byte as a quit byte. The only difference is that when this flag is used, the quit bytes are only added if the pattern contains a Unicode word boundary. "#, ), Usage::new( "--quit", "Add quit bytes to this DFA.", r#" Add quit bytes to this DFA. When a quit byte is added to a DFA, then an outgoing transition to every state for this byte is added to the DFA that points to a special sentinel "quit" state. If the "quit" state is entered during a search, then an error is returned. The bytes given represent a set and may be specified as a sequence. Escape sequences like \n and \xFF are supported. "#, ), Usage::new( "--specialize-start-states", "Specializes start states for prefilter support.", r#" When given, start states are "specialized" such that prefilters are better supported. Namely, when start states are specialized they are given a special tag that results in them being treated as a special case when entered at search time. The special case is that a prefilter can be run at that point in an attempt to accelerate the search. In general, it only makes sense to specialize start states when a prefilter is also enabled. Note also that if start states are not specialized (the default), then it is in general not possible to determine whether any given state ID is a start state, unless you've enumerated all possible start states and checked it against that set. "#, ), Usage::new( "--cache-capacity", "Set the total cache capacity for the lazy DFA.", r#" This sets an approximate limit on the total amount of heap memory used by the lazy DFA. Once the cache reaches capacity and there's no more room for additional states, the cache is cleared and the lazy DFA keeps rebuilding itself. "#, ), Usage::new( "--skip-cache-capacity-check", "Use the minimum cache capacity instead of failing.", r#" By default, building a lazy DFA will fail if the configured cache capacity is too small to hold a small minimum number of DFA states. But enabling this option will "skip" that check and instead force the cache to be at least as big as the minimum size required. This can be useful if you are trying hard to avoid cases where the lazy DFA fails to build, but is in general not recommended. Namely, if the cache is too small, then it's plausible that it will not be used efficiently and the overall search would be slow. But it's not guaranteed to be slow, which is why this setting is configurable. "#, ), Usage::new( "--minimum-cache-clear-count", "Set the minimum number of times the cache must be cleared.", r#" Sets the minimum number of times the cache must be cleared before the lazy DFA is permitted to give up on the search and return an error. This may be set to the special value 'none', which sets no minimum. In this case, the lazy DFA will never give up. When its cache gets full, it will clear it and keep going. This flag is usually used in combination with --minimum-bytes-per-state. It sets a baseline number of cache clearings but then also requires an efficiency below a certain amount before the lazy DFA will quit. When --minimum-bytes-per-state is not set and a minimum cache clear count has been set, then the lazy DFA unconditionally quits after the cache has been cleared the minimum number of times. "#, ), Usage::new( "--minimum-bytes-per-state", "Sets the minimum efficiency for lazy DFA.", r#" This flag sets the minimum efficiency of the lazy DFA in terms of the number of bytes processed per new state added to the cache. If that number falls below the efficiency given by this flag *and* the cache has been cleared the minimum number of times, then the lazy DFA will quit the search and return an error. This flag has no effect if --minimum-cache-clear-count is set to 'none'. "#, ), ]; USAGES } } <file_sep>/regex-capi/src/lib.rs #[macro_use] mod macros; mod error; mod rure; pub use crate::error::*; pub use crate::rure::*; <file_sep>/regex-automata/src/nfa/thompson/nfa.rs use core::{fmt, mem}; use alloc::{boxed::Box, format, string::String, sync::Arc, vec, vec::Vec}; #[cfg(feature = "syntax")] use crate::nfa::thompson::{ compiler::{Compiler, Config}, error::BuildError, }; use crate::{ nfa::thompson::builder::Builder, util::{ alphabet::{self, ByteClassSet, ByteClasses}, captures::{GroupInfo, GroupInfoError}, look::{Look, LookMatcher, LookSet}, primitives::{ IteratorIndexExt, PatternID, PatternIDIter, SmallIndex, StateID, }, sparse_set::SparseSet, }, }; /// A byte oriented Thompson non-deterministic finite automaton (NFA). /// /// A Thompson NFA is a finite state machine that permits unconditional epsilon /// transitions, but guarantees that there exists at most one non-epsilon /// transition for each element in the alphabet for each state. /// /// An NFA may be used directly for searching, for analysis or to build /// a deterministic finite automaton (DFA). /// /// # Cheap clones /// /// Since an NFA is a core data type in this crate that many other regex /// engines are based on top of, it is convenient to give ownership of an NFA /// to said regex engines. Because of this, an NFA uses reference counting /// internally. Therefore, it is cheap to clone and it is encouraged to do so. /// /// # Capabilities /// /// Using an NFA for searching via the /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) provides the most amount /// of "power" of any regex engine in this crate. Namely, it supports the /// following in all cases: /// /// 1. Detection of a match. /// 2. Location of a match, including both the start and end offset, in a /// single pass of the haystack. /// 3. Location of matching capturing groups. /// 4. Handles multiple patterns, including (1)-(3) when multiple patterns are /// present. /// /// # Capturing Groups /// /// Groups refer to parenthesized expressions inside a regex pattern. They look /// like this, where `exp` is an arbitrary regex: /// /// * `(exp)` - An unnamed capturing group. /// * `(?P<name>exp)` or `(?<name>exp)` - A named capturing group. /// * `(?:exp)` - A non-capturing group. /// * `(?i:exp)` - A non-capturing group that sets flags. /// /// Only the first two forms are said to be _capturing_. Capturing /// means that the last position at which they match is reportable. The /// [`Captures`](crate::util::captures::Captures) type provides convenient /// access to the match positions of capturing groups, which includes looking /// up capturing groups by their name. /// /// # Byte oriented /// /// This NFA is byte oriented, which means that all of its transitions are /// defined on bytes. In other words, the alphabet of an NFA consists of the /// 256 different byte values. /// /// While DFAs nearly demand that they be byte oriented for performance /// reasons, an NFA could conceivably be *Unicode codepoint* oriented. Indeed, /// a previous version of this NFA supported both byte and codepoint oriented /// modes. A codepoint oriented mode can work because an NFA fundamentally uses /// a sparse representation of transitions, which works well with the large /// sparse space of Unicode codepoints. /// /// Nevertheless, this NFA is only byte oriented. This choice is primarily /// driven by implementation simplicity, and also in part memory usage. In /// practice, performance between the two is roughly comparable. However, /// building a DFA (including a hybrid DFA) really wants a byte oriented NFA. /// So if we do have a codepoint oriented NFA, then we also need to generate /// byte oriented NFA in order to build an hybrid NFA/DFA. Thus, by only /// generating byte oriented NFAs, we can produce one less NFA. In other words, /// if we made our NFA codepoint oriented, we'd need to *also* make it support /// a byte oriented mode, which is more complicated. But a byte oriented mode /// can support everything. /// /// # Differences with DFAs /// /// At the theoretical level, the precise difference between an NFA and a DFA /// is that, in a DFA, for every state, an input symbol unambiguously refers /// to a single transition _and_ that an input symbol is required for each /// transition. At a practical level, this permits DFA implementations to be /// implemented at their core with a small constant number of CPU instructions /// for each byte of input searched. In practice, this makes them quite a bit /// faster than NFAs _in general_. Namely, in order to execute a search for any /// Thompson NFA, one needs to keep track of a _set_ of states, and execute /// the possible transitions on all of those states for each input symbol. /// Overall, this results in much more overhead. To a first approximation, one /// can expect DFA searches to be about an order of magnitude faster. /// /// So why use an NFA at all? The main advantage of an NFA is that it takes /// linear time (in the size of the pattern string after repetitions have been /// expanded) to build and linear memory usage. A DFA, on the other hand, may /// take exponential time and/or space to build. Even in non-pathological /// cases, DFAs often take quite a bit more memory than their NFA counterparts, /// _especially_ if large Unicode character classes are involved. Of course, /// an NFA also provides additional capabilities. For example, it can match /// Unicode word boundaries on non-ASCII text and resolve the positions of /// capturing groups. /// /// Note that a [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) strikes a /// good balance between an NFA and a DFA. It avoids the exponential build time /// of a DFA while maintaining its fast search time. The downside of a hybrid /// NFA/DFA is that in some cases it can be slower at search time than the NFA. /// (It also has less functionality than a pure NFA. It cannot handle Unicode /// word boundaries on non-ASCII text and cannot resolve capturing groups.) /// /// # Example /// /// This shows how to build an NFA with the default configuration and execute a /// search using the Pike VM. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new(r"foo[0-9]+")?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let expected = Some(Match::must(0, 0..8)); /// re.captures(&mut cache, b"foo12345", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: resolving capturing groups /// /// This example shows how to parse some simple dates and extract the /// components of each date via capturing groups. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::captures::Captures, /// }; /// /// let vm = PikeVM::new(r"(?P<y>\d{4})-(?P<m>\d{2})-(?P<d>\d{2})")?; /// let mut cache = vm.create_cache(); /// /// let haystack = "2012-03-14, 2013-01-01 and 2014-07-05"; /// let all: Vec<Captures> = vm.captures_iter( /// &mut cache, haystack.as_bytes() /// ).collect(); /// // There should be a total of 3 matches. /// assert_eq!(3, all.len()); /// // The year from the second match is '2013'. /// let span = all[1].get_group_by_name("y").unwrap(); /// assert_eq!("2013", &haystack[span]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This example shows that only the last match of a capturing group is /// reported, even if it had to match multiple times for an overall match /// to occur. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; /// /// let re = PikeVM::new(r"([a-z]){4}")?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let haystack = b"quux"; /// re.captures(&mut cache, haystack, &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(Span::from(3..4)), caps.get_group(1)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone)] pub struct NFA( // We make NFAs reference counted primarily for two reasons. First is that // the NFA type itself is quite large (at least 0.5KB), and so it makes // sense to put it on the heap by default anyway. Second is that, for Arc // specifically, this enables cheap clones. This tends to be useful because // several structures (the backtracker, the Pike VM, the hybrid NFA/DFA) // all want to hang on to an NFA for use during search time. We could // provide the NFA at search time via a function argument, but this makes // for an unnecessarily annoying API. Instead, we just let each structure // share ownership of the NFA. Using a deep clone would not be smart, since // the NFA can use quite a bit of heap space. Arc<Inner>, ); impl NFA { /// Parse the given regular expression using a default configuration and /// build an NFA from it. /// /// If you want a non-default configuration, then use the NFA /// [`Compiler`] with a [`Config`]. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new(r"foo[0-9]+")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let expected = Some(Match::must(0, 0..8)); /// re.captures(&mut cache, b"foo12345", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new(pattern: &str) -> Result<NFA, BuildError> { NFA::compiler().build(pattern) } /// Parse the given regular expressions using a default configuration and /// build a multi-NFA from them. /// /// If you want a non-default configuration, then use the NFA /// [`Compiler`] with a [`Config`]. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new_many(&["[0-9]+", "[a-z]+"])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let expected = Some(Match::must(1, 0..3)); /// re.captures(&mut cache, b"foo12345bar", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new_many<P: AsRef<str>>(patterns: &[P]) -> Result<NFA, BuildError> { NFA::compiler().build_many(patterns) } /// Returns an NFA with a single regex pattern that always matches at every /// position. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; /// /// let re = PikeVM::new_from_nfa(NFA::always_match())?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let expected = Some(Match::must(0, 0..0)); /// re.captures(&mut cache, b"", &mut caps); /// assert_eq!(expected, caps.get_match()); /// re.captures(&mut cache, b"foo", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn always_match() -> NFA { // We could use NFA::new("") here and we'd get the same semantics, but // hand-assembling the NFA (as below) does the same thing with a fewer // number of states. It also avoids needing the 'syntax' feature // enabled. // // Technically all we need is the "match" state, but we add the // "capture" states so that the PikeVM can use this NFA. // // The unwraps below are OK because we add so few states that they will // never exhaust any default limits in any environment. let mut builder = Builder::new(); let pid = builder.start_pattern().unwrap(); assert_eq!(pid.as_usize(), 0); let start_id = builder.add_capture_start(StateID::ZERO, 0, None).unwrap(); let end_id = builder.add_capture_end(StateID::ZERO, 0).unwrap(); let match_id = builder.add_match().unwrap(); builder.patch(start_id, end_id).unwrap(); builder.patch(end_id, match_id).unwrap(); let pid = builder.finish_pattern(start_id).unwrap(); assert_eq!(pid.as_usize(), 0); builder.build(start_id, start_id).unwrap() } /// Returns an NFA that never matches at any position. /// /// This is a convenience routine for creating an NFA with zero patterns. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::{NFA, pikevm::PikeVM}; /// /// let re = PikeVM::new_from_nfa(NFA::never_match())?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, b"", &mut caps); /// assert!(!caps.is_match()); /// re.captures(&mut cache, b"foo", &mut caps); /// assert!(!caps.is_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn never_match() -> NFA { // This always succeeds because it only requires one NFA state, which // will never exhaust any (default) limits. let mut builder = Builder::new(); let sid = builder.add_fail().unwrap(); builder.build(sid, sid).unwrap() } /// Return a default configuration for an `NFA`. /// /// This is a convenience routine to avoid needing to import the `Config` /// type when customizing the construction of an NFA. /// /// # Example /// /// This example shows how to build an NFA with a small size limit that /// results in a compilation error for any regex that tries to use more /// heap memory than the configured limit. /// /// ``` /// use regex_automata::nfa::thompson::{NFA, pikevm::PikeVM}; /// /// let result = PikeVM::builder() /// .thompson(NFA::config().nfa_size_limit(Some(1_000))) /// // Remember, \w is Unicode-aware by default and thus huge. /// .build(r"\w+"); /// assert!(result.is_err()); /// ``` #[cfg(feature = "syntax")] pub fn config() -> Config { Config::new() } /// Return a compiler for configuring the construction of an `NFA`. /// /// This is a convenience routine to avoid needing to import the /// [`Compiler`] type in common cases. /// /// # Example /// /// This example shows how to build an NFA that is permitted match invalid /// UTF-8. Without the additional syntax configuration here, compilation of /// `(?-u:.)` would fail because it is permitted to match invalid UTF-8. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::syntax, /// Match, /// }; /// /// let re = PikeVM::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .build(r"[a-z]+(?-u:.)")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let expected = Some(Match::must(0, 1..5)); /// re.captures(&mut cache, b"\xFFabc\xFF", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn compiler() -> Compiler { Compiler::new() } /// Returns an iterator over all pattern identifiers in this NFA. /// /// Pattern IDs are allocated in sequential order starting from zero, /// where the order corresponds to the order of patterns provided to the /// [`NFA::new_many`] constructor. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; /// let pids: Vec<PatternID> = nfa.patterns().collect(); /// assert_eq!(pids, vec![ /// PatternID::must(0), /// PatternID::must(1), /// PatternID::must(2), /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn patterns(&self) -> PatternIter<'_> { PatternIter { it: PatternID::iter(self.pattern_len()), _marker: core::marker::PhantomData, } } /// Returns the total number of regex patterns in this NFA. /// /// This may return zero if the NFA was constructed with no patterns. In /// this case, the NFA can never produce a match for any input. /// /// This is guaranteed to be no bigger than [`PatternID::LIMIT`] because /// NFA construction will fail if too many patterns are added. /// /// It is always true that `nfa.patterns().count() == nfa.pattern_len()`. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::NFA; /// /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; /// assert_eq!(3, nfa.pattern_len()); /// /// let nfa = NFA::never_match(); /// assert_eq!(0, nfa.pattern_len()); /// /// let nfa = NFA::always_match(); /// assert_eq!(1, nfa.pattern_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn pattern_len(&self) -> usize { self.0.start_pattern.len() } /// Return the state identifier of the initial anchored state of this NFA. /// /// The returned identifier is guaranteed to be a valid index into the /// slice returned by [`NFA::states`], and is also a valid argument to /// [`NFA::state`]. /// /// # Example /// /// This example shows a somewhat contrived example where we can easily /// predict the anchored starting state. /// /// ``` /// use regex_automata::nfa::thompson::{NFA, State, WhichCaptures}; /// /// let nfa = NFA::compiler() /// .configure(NFA::config().which_captures(WhichCaptures::None)) /// .build("a")?; /// let state = nfa.state(nfa.start_anchored()); /// match *state { /// State::ByteRange { trans } => { /// assert_eq!(b'a', trans.start); /// assert_eq!(b'a', trans.end); /// } /// _ => unreachable!("unexpected state"), /// } /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn start_anchored(&self) -> StateID { self.0.start_anchored } /// Return the state identifier of the initial unanchored state of this /// NFA. /// /// This is equivalent to the identifier returned by /// [`NFA::start_anchored`] when the NFA has no unanchored starting state. /// /// The returned identifier is guaranteed to be a valid index into the /// slice returned by [`NFA::states`], and is also a valid argument to /// [`NFA::state`]. /// /// # Example /// /// This example shows that the anchored and unanchored starting states /// are equivalent when an anchored NFA is built. /// /// ``` /// use regex_automata::nfa::thompson::NFA; /// /// let nfa = NFA::new("^a")?; /// assert_eq!(nfa.start_anchored(), nfa.start_unanchored()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn start_unanchored(&self) -> StateID { self.0.start_unanchored } /// Return the state identifier of the initial anchored state for the given /// pattern, or `None` if there is no pattern corresponding to the given /// identifier. /// /// If one uses the starting state for a particular pattern, then the only /// match that can be returned is for the corresponding pattern. /// /// The returned identifier is guaranteed to be a valid index into the /// slice returned by [`NFA::states`], and is also a valid argument to /// [`NFA::state`]. /// /// # Errors /// /// If the pattern doesn't exist in this NFA, then this returns an error. /// This occurs when `pid.as_usize() >= nfa.pattern_len()`. /// /// # Example /// /// This example shows that the anchored and unanchored starting states /// are equivalent when an anchored NFA is built. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let nfa = NFA::new_many(&["^a", "^b"])?; /// // The anchored and unanchored states for the entire NFA are the same, /// // since all of the patterns are anchored. /// assert_eq!(nfa.start_anchored(), nfa.start_unanchored()); /// // But the anchored starting states for each pattern are distinct, /// // because these starting states can only lead to matches for the /// // corresponding pattern. /// let anchored = Some(nfa.start_anchored()); /// assert_ne!(anchored, nfa.start_pattern(PatternID::must(0))); /// assert_ne!(anchored, nfa.start_pattern(PatternID::must(1))); /// // Requesting a pattern not in the NFA will result in None: /// assert_eq!(None, nfa.start_pattern(PatternID::must(2))); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn start_pattern(&self, pid: PatternID) -> Option<StateID> { self.0.start_pattern.get(pid.as_usize()).copied() } /// Get the byte class set for this NFA. /// /// A byte class set is a partitioning of this NFA's alphabet into /// equivalence classes. Any two bytes in the same equivalence class are /// guaranteed to never discriminate between a match or a non-match. (The /// partitioning may not be minimal.) /// /// Byte classes are used internally by this crate when building DFAs. /// Namely, among other optimizations, they enable a space optimization /// where the DFA's internal alphabet is defined over the equivalence /// classes of bytes instead of all possible byte values. The former is /// often quite a bit smaller than the latter, which permits the DFA to use /// less space for its transition table. #[inline] pub(crate) fn byte_class_set(&self) -> &ByteClassSet { &self.0.byte_class_set } /// Get the byte classes for this NFA. /// /// Byte classes represent a partitioning of this NFA's alphabet into /// equivalence classes. Any two bytes in the same equivalence class are /// guaranteed to never discriminate between a match or a non-match. (The /// partitioning may not be minimal.) /// /// Byte classes are used internally by this crate when building DFAs. /// Namely, among other optimizations, they enable a space optimization /// where the DFA's internal alphabet is defined over the equivalence /// classes of bytes instead of all possible byte values. The former is /// often quite a bit smaller than the latter, which permits the DFA to use /// less space for its transition table. /// /// # Example /// /// This example shows how to query the class of various bytes. /// /// ``` /// use regex_automata::nfa::thompson::NFA; /// /// let nfa = NFA::new("[a-z]+")?; /// let classes = nfa.byte_classes(); /// // 'a' and 'z' are in the same class for this regex. /// assert_eq!(classes.get(b'a'), classes.get(b'z')); /// // But 'a' and 'A' are not. /// assert_ne!(classes.get(b'a'), classes.get(b'A')); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn byte_classes(&self) -> &ByteClasses { &self.0.byte_classes } /// Return a reference to the NFA state corresponding to the given ID. /// /// This is a convenience routine for `nfa.states()[id]`. /// /// # Panics /// /// This panics when the given identifier does not reference a valid state. /// That is, when `id.as_usize() >= nfa.states().len()`. /// /// # Example /// /// The anchored state for a pattern will typically correspond to a /// capturing state for that pattern. (Although, this is not an API /// guarantee!) /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, State}, PatternID}; /// /// let nfa = NFA::new("a")?; /// let state = nfa.state(nfa.start_pattern(PatternID::ZERO).unwrap()); /// match *state { /// State::Capture { slot, .. } => { /// assert_eq!(0, slot.as_usize()); /// } /// _ => unreachable!("unexpected state"), /// } /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn state(&self, id: StateID) -> &State { &self.states()[id] } /// Returns a slice of all states in this NFA. /// /// The slice returned is indexed by `StateID`. This provides a convenient /// way to access states while following transitions among those states. /// /// # Example /// /// This demonstrates that disabling UTF-8 mode can shrink the size of the /// NFA considerably in some cases, especially when using Unicode character /// classes. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::NFA; /// /// let nfa_unicode = NFA::new(r"\w")?; /// let nfa_ascii = NFA::new(r"(?-u)\w")?; /// // Yes, a factor of 45 difference. No lie. /// assert!(40 * nfa_ascii.states().len() < nfa_unicode.states().len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn states(&self) -> &[State] { &self.0.states } /// Returns the capturing group info for this NFA. /// /// The [`GroupInfo`] provides a way to map to and from capture index /// and capture name for each pattern. It also provides a mapping from /// each of the capturing groups in every pattern to their corresponding /// slot offsets encoded in [`State::Capture`] states. /// /// Note that `GroupInfo` uses reference counting internally, such that /// cloning a `GroupInfo` is very cheap. /// /// # Example /// /// This example shows how to get a list of all capture group names for /// a particular pattern. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let nfa = NFA::new(r"(a)(?P<foo>b)(c)(d)(?P<bar>e)")?; /// // The first is the implicit group that is always unnammed. The next /// // 5 groups are the explicit groups found in the concrete syntax above. /// let expected = vec![None, None, Some("foo"), None, None, Some("bar")]; /// let got: Vec<Option<&str>> = /// nfa.group_info().pattern_names(PatternID::ZERO).collect(); /// assert_eq!(expected, got); /// /// // Using an invalid pattern ID will result in nothing yielded. /// let got = nfa.group_info().pattern_names(PatternID::must(999)).count(); /// assert_eq!(0, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn group_info(&self) -> &GroupInfo { &self.0.group_info() } /// Returns true if and only if this NFA has at least one /// [`Capture`](State::Capture) in its sequence of states. /// /// This is useful as a way to perform a quick test before attempting /// something that does or does not require capture states. For example, /// some regex engines (like the PikeVM) require capture states in order to /// work at all. /// /// # Example /// /// This example shows a few different NFAs and whether they have captures /// or not. /// /// ``` /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; /// /// // Obviously has capture states. /// let nfa = NFA::new("(a)")?; /// assert!(nfa.has_capture()); /// /// // Less obviously has capture states, because every pattern has at /// // least one anonymous capture group corresponding to the match for the /// // entire pattern. /// let nfa = NFA::new("a")?; /// assert!(nfa.has_capture()); /// /// // Other than hand building your own NFA, this is the only way to build /// // an NFA without capturing groups. In general, you should only do this /// // if you don't intend to use any of the NFA-oriented regex engines. /// // Overall, capturing groups don't have many downsides. Although they /// // can add a bit of noise to simple NFAs, so it can be nice to disable /// // them for debugging purposes. /// // /// // Notice that 'has_capture' is false here even when we have an /// // explicit capture group in the pattern. /// let nfa = NFA::compiler() /// .configure(NFA::config().which_captures(WhichCaptures::None)) /// .build("(a)")?; /// assert!(!nfa.has_capture()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn has_capture(&self) -> bool { self.0.has_capture } /// Returns true if and only if this NFA can match the empty string. /// When it returns false, all possible matches are guaranteed to have a /// non-zero length. /// /// This is useful as cheap way to know whether code needs to handle the /// case of a zero length match. This is particularly important when UTF-8 /// modes are enabled, as when UTF-8 mode is enabled, empty matches that /// split a codepoint must never be reported. This extra handling can /// sometimes be costly, and since regexes matching an empty string are /// somewhat rare, it can be beneficial to treat such regexes specially. /// /// # Example /// /// This example shows a few different NFAs and whether they match the /// empty string or not. Notice the empty string isn't merely a matter /// of a string of length literally `0`, but rather, whether a match can /// occur between specific pairs of bytes. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::syntax}; /// /// // The empty regex matches the empty string. /// let nfa = NFA::new("")?; /// assert!(nfa.has_empty(), "empty matches empty"); /// // The '+' repetition operator requires at least one match, and so /// // does not match the empty string. /// let nfa = NFA::new("a+")?; /// assert!(!nfa.has_empty(), "+ does not match empty"); /// // But the '*' repetition operator does. /// let nfa = NFA::new("a*")?; /// assert!(nfa.has_empty(), "* does match empty"); /// // And wrapping '+' in an operator that can match an empty string also /// // causes it to match the empty string too. /// let nfa = NFA::new("(a+)*")?; /// assert!(nfa.has_empty(), "+ inside of * matches empty"); /// /// // If a regex is just made of a look-around assertion, even if the /// // assertion requires some kind of non-empty string around it (such as /// // \b), then it is still treated as if it matches the empty string. /// // Namely, if a match occurs of just a look-around assertion, then the /// // match returned is empty. /// let nfa = NFA::compiler() /// .syntax(syntax::Config::new().utf8(false)) /// .build(r"^$\A\z\b\B(?-u:\b\B)")?; /// assert!(nfa.has_empty(), "assertions match empty"); /// // Even when an assertion is wrapped in a '+', it still matches the /// // empty string. /// let nfa = NFA::new(r"\b+")?; /// assert!(nfa.has_empty(), "+ of an assertion matches empty"); /// /// // An alternation with even one branch that can match the empty string /// // is also said to match the empty string overall. /// let nfa = NFA::new("foo|(bar)?|quux")?; /// assert!(nfa.has_empty(), "alternations can match empty"); /// /// // An NFA that matches nothing does not match the empty string. /// let nfa = NFA::new("[a&&b]")?; /// assert!(!nfa.has_empty(), "never matching means not matching empty"); /// // But if it's wrapped in something that doesn't require a match at /// // all, then it can match the empty string! /// let nfa = NFA::new("[a&&b]*")?; /// assert!(nfa.has_empty(), "* on never-match still matches empty"); /// // Since a '+' requires a match, using it on something that can never /// // match will itself produce a regex that can never match anything, /// // and thus does not match the empty string. /// let nfa = NFA::new("[a&&b]+")?; /// assert!(!nfa.has_empty(), "+ on never-match still matches nothing"); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn has_empty(&self) -> bool { self.0.has_empty } /// Whether UTF-8 mode is enabled for this NFA or not. /// /// When UTF-8 mode is enabled, all matches reported by a regex engine /// derived from this NFA are guaranteed to correspond to spans of valid /// UTF-8. This includes zero-width matches. For example, the regex engine /// must guarantee that the empty regex will not match at the positions /// between code units in the UTF-8 encoding of a single codepoint. /// /// See [`Config::utf8`] for more information. /// /// This is enabled by default. /// /// # Example /// /// This example shows how UTF-8 mode can impact the match spans that may /// be reported in certain cases. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, pikevm::PikeVM}, /// Match, Input, /// }; /// /// let re = PikeVM::new("")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// // UTF-8 mode is enabled by default. /// let mut input = Input::new("☃"); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 0..0)), caps.get_match()); /// /// // Even though an empty regex matches at 1..1, our next match is /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is /// // three bytes long). /// input.set_start(1); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); /// /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: /// let re = PikeVM::builder() /// .thompson(thompson::Config::new().utf8(false)) /// .build("")?; /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 1..1)), caps.get_match()); /// /// input.set_start(2); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 2..2)), caps.get_match()); /// /// input.set_start(3); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); /// /// input.set_start(4); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(None, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_utf8(&self) -> bool { self.0.utf8 } /// Returns true when this NFA is meant to be matched in reverse. /// /// Generally speaking, when this is true, it means the NFA is supposed to /// be used in conjunction with moving backwards through the haystack. That /// is, from a higher memory address to a lower memory address. /// /// It is often the case that lower level routines dealing with an NFA /// don't need to care about whether it is "meant" to be matched in reverse /// or not. However, there are some specific cases where it matters. For /// example, the implementation of CRLF-aware `^` and `$` line anchors /// needs to know whether the search is in the forward or reverse /// direction. In the forward direction, neither `^` nor `$` should match /// when a `\r` has been seen previously and a `\n` is next. However, in /// the reverse direction, neither `^` nor `$` should match when a `\n` /// has been seen previously and a `\r` is next. This fundamentally changes /// how the state machine is constructed, and thus needs to be altered /// based on the direction of the search. /// /// This is automatically set when using a [`Compiler`] with a configuration /// where [`Config::reverse`] is enabled. If you're building your own NFA /// by hand via a [`Builder`] #[inline] pub fn is_reverse(&self) -> bool { self.0.reverse } /// Returns true if and only if all starting states for this NFA correspond /// to the beginning of an anchored search. /// /// Typically, an NFA will have both an anchored and an unanchored starting /// state. Namely, because it tends to be useful to have both and the cost /// of having an unanchored starting state is almost zero (for an NFA). /// However, if all patterns in the NFA are themselves anchored, then even /// the unanchored starting state will correspond to an anchored search /// since the pattern doesn't permit anything else. /// /// # Example /// /// This example shows a few different scenarios where this method's /// return value varies. /// /// ``` /// use regex_automata::nfa::thompson::NFA; /// /// // The unanchored starting state permits matching this pattern anywhere /// // in a haystack, instead of just at the beginning. /// let nfa = NFA::new("a")?; /// assert!(!nfa.is_always_start_anchored()); /// /// // In this case, the pattern is itself anchored, so there is no way /// // to run an unanchored search. /// let nfa = NFA::new("^a")?; /// assert!(nfa.is_always_start_anchored()); /// /// // When multiline mode is enabled, '^' can match at the start of a line /// // in addition to the start of a haystack, so an unanchored search is /// // actually possible. /// let nfa = NFA::new("(?m)^a")?; /// assert!(!nfa.is_always_start_anchored()); /// /// // Weird cases also work. A pattern is only considered anchored if all /// // matches may only occur at the start of a haystack. /// let nfa = NFA::new("(^a)|a")?; /// assert!(!nfa.is_always_start_anchored()); /// /// // When multiple patterns are present, if they are all anchored, then /// // the NFA is always anchored too. /// let nfa = NFA::new_many(&["^a", "^b", "^c"])?; /// assert!(nfa.is_always_start_anchored()); /// /// // But if one pattern is unanchored, then the NFA must permit an /// // unanchored search. /// let nfa = NFA::new_many(&["^a", "b", "^c"])?; /// assert!(!nfa.is_always_start_anchored()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_always_start_anchored(&self) -> bool { self.start_anchored() == self.start_unanchored() } /// Returns the look-around matcher associated with this NFA. /// /// A look-around matcher determines how to match look-around assertions. /// In particular, some assertions are configurable. For example, the /// `(?m:^)` and `(?m:$)` assertions can have their line terminator changed /// from the default of `\n` to any other byte. /// /// If the NFA was built using a [`Compiler`], then this matcher /// can be set via the [`Config::look_matcher`] configuration /// knob. Otherwise, if you've built an NFA by hand, it is set via /// [`Builder::set_look_matcher`]. /// /// # Example /// /// This shows how to change the line terminator for multi-line assertions. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, pikevm::PikeVM}, /// util::look::LookMatcher, /// Match, Input, /// }; /// /// let mut lookm = LookMatcher::new(); /// lookm.set_line_terminator(b'\x00'); /// /// let re = PikeVM::builder() /// .thompson(thompson::Config::new().look_matcher(lookm)) /// .build(r"(?m)^[a-z]+$")?; /// let mut cache = re.create_cache(); /// /// // Multi-line assertions now use NUL as a terminator. /// assert_eq!( /// Some(Match::must(0, 1..4)), /// re.find(&mut cache, b"\x00abc\x00"), /// ); /// // ... and \n is no longer recognized as a terminator. /// assert_eq!( /// None, /// re.find(&mut cache, b"\nabc\n"), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn look_matcher(&self) -> &LookMatcher { &self.0.look_matcher } /// Returns the union of all look-around assertions used throughout this /// NFA. When the returned set is empty, it implies that the NFA has no /// look-around assertions and thus zero conditional epsilon transitions. /// /// This is useful in some cases enabling optimizations. It is not /// unusual, for example, for optimizations to be of the form, "for any /// regex with zero conditional epsilon transitions, do ..." where "..." /// is some kind of optimization. /// /// This isn't only helpful for optimizations either. Sometimes look-around /// assertions are difficult to support. For example, many of the DFAs in /// this crate don't support Unicode word boundaries or handle them using /// heuristics. Handling that correctly typically requires some kind of /// cheap check of whether the NFA has a Unicode word boundary in the first /// place. /// /// # Example /// /// This example shows how this routine varies based on the regex pattern: /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; /// /// // No look-around at all. /// let nfa = NFA::new("a")?; /// assert!(nfa.look_set_any().is_empty()); /// /// // When multiple patterns are present, since this returns the union, /// // it will include look-around assertions that only appear in one /// // pattern. /// let nfa = NFA::new_many(&["a", "b", "a^b", "c"])?; /// assert!(nfa.look_set_any().contains(Look::Start)); /// /// // Some groups of assertions have various shortcuts. For example: /// let nfa = NFA::new(r"(?-u:\b)")?; /// assert!(nfa.look_set_any().contains_word()); /// assert!(!nfa.look_set_any().contains_word_unicode()); /// assert!(nfa.look_set_any().contains_word_ascii()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn look_set_any(&self) -> LookSet { self.0.look_set_any } /// Returns the union of all prefix look-around assertions for every /// pattern in this NFA. When the returned set is empty, it implies none of /// the patterns require moving through a conditional epsilon transition /// before inspecting the first byte in the haystack. /// /// This can be useful for determining what kinds of assertions need to be /// satisfied at the beginning of a search. For example, typically DFAs /// in this crate will build a distinct starting state for each possible /// starting configuration that might result in look-around assertions /// being satisfied differently. However, if the set returned here is /// empty, then you know that the start state is invariant because there /// are no conditional epsilon transitions to consider. /// /// # Example /// /// This example shows how this routine varies based on the regex pattern: /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; /// /// // No look-around at all. /// let nfa = NFA::new("a")?; /// assert!(nfa.look_set_prefix_any().is_empty()); /// /// // When multiple patterns are present, since this returns the union, /// // it will include look-around assertions that only appear in one /// // pattern. But it will only include assertions that are in the prefix /// // of a pattern. For example, this includes '^' but not '$' even though /// // '$' does appear. /// let nfa = NFA::new_many(&["a", "b", "^ab$", "c"])?; /// assert!(nfa.look_set_prefix_any().contains(Look::Start)); /// assert!(!nfa.look_set_prefix_any().contains(Look::End)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn look_set_prefix_any(&self) -> LookSet { self.0.look_set_prefix_any } // FIXME: The `look_set_prefix_all` computation was not correct, and it // seemed a little tricky to fix it. Since I wasn't actually using it for // anything, I just decided to remove it in the run up to the regex 1.9 // release. If you need this, please file an issue. /* /// Returns the intersection of all prefix look-around assertions for every /// pattern in this NFA. When the returned set is empty, it implies at /// least one of the patterns does not require moving through a conditional /// epsilon transition before inspecting the first byte in the haystack. /// Conversely, when the set contains an assertion, it implies that every /// pattern in the NFA also contains that assertion in its prefix. /// /// This can be useful for determining what kinds of assertions need to be /// satisfied at the beginning of a search. For example, if you know that /// [`Look::Start`] is in the prefix intersection set returned here, then /// you know that all searches, regardless of input configuration, will be /// anchored. /// /// # Example /// /// This example shows how this routine varies based on the regex pattern: /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; /// /// // No look-around at all. /// let nfa = NFA::new("a")?; /// assert!(nfa.look_set_prefix_all().is_empty()); /// /// // When multiple patterns are present, since this returns the /// // intersection, it will only include assertions present in every /// // prefix, and only the prefix. /// let nfa = NFA::new_many(&["^a$", "^b$", "$^ab$", "^c$"])?; /// assert!(nfa.look_set_prefix_all().contains(Look::Start)); /// assert!(!nfa.look_set_prefix_all().contains(Look::End)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn look_set_prefix_all(&self) -> LookSet { self.0.look_set_prefix_all } */ /// Returns the memory usage, in bytes, of this NFA. /// /// This does **not** include the stack size used up by this NFA. To /// compute that, use `std::mem::size_of::<NFA>()`. /// /// # Example /// /// This example shows that large Unicode character classes can use quite /// a bit of memory. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::NFA; /// /// let nfa_unicode = NFA::new(r"\w")?; /// let nfa_ascii = NFA::new(r"(?-u:\w)")?; /// /// assert!(10 * nfa_ascii.memory_usage() < nfa_unicode.memory_usage()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn memory_usage(&self) -> usize { use core::mem::size_of; size_of::<Inner>() // allocated on the heap via Arc + self.0.states.len() * size_of::<State>() + self.0.start_pattern.len() * size_of::<StateID>() + self.0.group_info.memory_usage() + self.0.memory_extra } } impl fmt::Debug for NFA { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } /// The "inner" part of the NFA. We split this part out so that we can easily /// wrap it in an `Arc` above in the definition of `NFA`. /// /// See builder.rs for the code that actually builds this type. This module /// does provide (internal) mutable methods for adding things to this /// NFA before finalizing it, but the high level construction process is /// controlled by the builder abstraction. (Which is complicated enough to /// get its own module.) #[derive(Default)] pub(super) struct Inner { /// The state sequence. This sequence is guaranteed to be indexable by all /// starting state IDs, and it is also guaranteed to contain at most one /// `Match` state for each pattern compiled into this NFA. (A pattern may /// not have a corresponding `Match` state if a `Match` state is impossible /// to reach.) states: Vec<State>, /// The anchored starting state of this NFA. start_anchored: StateID, /// The unanchored starting state of this NFA. start_unanchored: StateID, /// The starting states for each individual pattern. Starting at any /// of these states will result in only an anchored search for the /// corresponding pattern. The vec is indexed by pattern ID. When the NFA /// contains a single regex, then `start_pattern[0]` and `start_anchored` /// are always equivalent. start_pattern: Vec<StateID>, /// Info about the capturing groups in this NFA. This is responsible for /// mapping groups to slots, mapping groups to names and names to groups. group_info: GroupInfo, /// A representation of equivalence classes over the transitions in this /// NFA. Two bytes in the same equivalence class must not discriminate /// between a match or a non-match. This map can be used to shrink the /// total size of a DFA's transition table with a small match-time cost. /// /// Note that the NFA's transitions are *not* defined in terms of these /// equivalence classes. The NFA's transitions are defined on the original /// byte values. For the most part, this is because they wouldn't really /// help the NFA much since the NFA already uses a sparse representation /// to represent transitions. Byte classes are most effective in a dense /// representation. byte_class_set: ByteClassSet, /// This is generated from `byte_class_set`, and essentially represents the /// same thing but supports different access patterns. Namely, this permits /// looking up the equivalence class of a byte very cheaply. /// /// Ideally we would just store this, but because of annoying code /// structure reasons, we keep both this and `byte_class_set` around for /// now. I think I would prefer that `byte_class_set` were computed in the /// `Builder`, but right now, we compute it as states are added to the /// `NFA`. byte_classes: ByteClasses, /// Whether this NFA has a `Capture` state anywhere. has_capture: bool, /// When the empty string is in the language matched by this NFA. has_empty: bool, /// Whether UTF-8 mode is enabled for this NFA. Briefly, this means that /// all non-empty matches produced by this NFA correspond to spans of valid /// UTF-8, and any empty matches produced by this NFA that split a UTF-8 /// encoded codepoint should be filtered out by the corresponding regex /// engine. utf8: bool, /// Whether this NFA is meant to be matched in reverse or not. reverse: bool, /// The matcher to be used for look-around assertions. look_matcher: LookMatcher, /// The union of all look-around assertions that occur anywhere within /// this NFA. If this set is empty, then it means there are precisely zero /// conditional epsilon transitions in the NFA. look_set_any: LookSet, /// The union of all look-around assertions that occur as a zero-length /// prefix for any of the patterns in this NFA. look_set_prefix_any: LookSet, /* /// The intersection of all look-around assertions that occur as a /// zero-length prefix for any of the patterns in this NFA. look_set_prefix_all: LookSet, */ /// Heap memory used indirectly by NFA states and other things (like the /// various capturing group representations above). Since each state /// might use a different amount of heap, we need to keep track of this /// incrementally. memory_extra: usize, } impl Inner { /// Runs any last finalization bits and turns this into a full NFA. pub(super) fn into_nfa(mut self) -> NFA { self.byte_classes = self.byte_class_set.byte_classes(); // Do epsilon closure from the start state of every pattern in order // to compute various properties such as look-around assertions and // whether the empty string can be matched. let mut stack = vec![]; let mut seen = SparseSet::new(self.states.len()); for &start_id in self.start_pattern.iter() { stack.push(start_id); seen.clear(); // let mut prefix_all = LookSet::full(); let mut prefix_any = LookSet::empty(); while let Some(sid) = stack.pop() { if !seen.insert(sid) { continue; } match self.states[sid] { State::ByteRange { .. } | State::Dense { .. } | State::Fail => continue, State::Sparse(_) => { // This snippet below will rewrite this sparse state // as a dense state. By doing it here, we apply this // optimization to all hot "sparse" states since these // are the states that are reachable from the start // state via an epsilon closure. // // Unfortunately, this optimization did not seem to // help much in some very limited ad hoc benchmarking. // // I left the 'Dense' state type in place in case we // want to revisit this, but I suspect the real way // to make forward progress is a more fundamental // rearchitecting of how data in the NFA is laid out. // I think we should consider a single contiguous // allocation instead of all this indirection and // potential heap allocations for every state. But this // is a large re-design and will require API breaking // changes. // self.memory_extra -= self.states[sid].memory_usage(); // let trans = DenseTransitions::from_sparse(sparse); // self.states[sid] = State::Dense(trans); // self.memory_extra += self.states[sid].memory_usage(); continue; } State::Match { .. } => self.has_empty = true, State::Look { look, next } => { prefix_any = prefix_any.insert(look); stack.push(next); } State::Union { ref alternates } => { // Order doesn't matter here, since we're just dealing // with look-around sets. But if we do richer analysis // here that needs to care about preference order, then // this should be done in reverse. stack.extend(alternates.iter()); } State::BinaryUnion { alt1, alt2 } => { stack.push(alt2); stack.push(alt1); } State::Capture { next, .. } => { stack.push(next); } } } self.look_set_prefix_any = self.look_set_prefix_any.union(prefix_any); } NFA(Arc::new(self)) } /// Returns the capturing group info for this NFA. pub(super) fn group_info(&self) -> &GroupInfo { &self.group_info } /// Add the given state to this NFA after allocating a fresh identifier for /// it. /// /// This panics if too many states are added such that a fresh identifier /// could not be created. (Currently, the only caller of this routine is /// a `Builder`, and it upholds this invariant.) pub(super) fn add(&mut self, state: State) -> StateID { match state { State::ByteRange { ref trans } => { self.byte_class_set.set_range(trans.start, trans.end); } State::Sparse(ref sparse) => { for trans in sparse.transitions.iter() { self.byte_class_set.set_range(trans.start, trans.end); } } State::Dense { .. } => unreachable!(), State::Look { look, .. } => { self.look_matcher .add_to_byteset(look, &mut self.byte_class_set); self.look_set_any = self.look_set_any.insert(look); } State::Capture { .. } => { self.has_capture = true; } State::Union { .. } | State::BinaryUnion { .. } | State::Fail | State::Match { .. } => {} } let id = StateID::new(self.states.len()).unwrap(); self.memory_extra += state.memory_usage(); self.states.push(state); id } /// Set the starting state identifiers for this NFA. /// /// `start_anchored` and `start_unanchored` may be equivalent. When they /// are, then the NFA can only execute anchored searches. This might /// occur, for example, for patterns that are unconditionally anchored. /// e.g., `^foo`. pub(super) fn set_starts( &mut self, start_anchored: StateID, start_unanchored: StateID, start_pattern: &[StateID], ) { self.start_anchored = start_anchored; self.start_unanchored = start_unanchored; self.start_pattern = start_pattern.to_vec(); } /// Sets the UTF-8 mode of this NFA. pub(super) fn set_utf8(&mut self, yes: bool) { self.utf8 = yes; } /// Sets the reverse mode of this NFA. pub(super) fn set_reverse(&mut self, yes: bool) { self.reverse = yes; } /// Sets the look-around assertion matcher for this NFA. pub(super) fn set_look_matcher(&mut self, m: LookMatcher) { self.look_matcher = m; } /// Set the capturing groups for this NFA. /// /// The given slice should contain the capturing groups for each pattern, /// The capturing groups in turn should correspond to the total number of /// capturing groups in the pattern, including the anonymous first capture /// group for each pattern. If a capturing group does have a name, then it /// should be provided as a Arc<str>. /// /// This returns an error if a corresponding `GroupInfo` could not be /// built. pub(super) fn set_captures( &mut self, captures: &[Vec<Option<Arc<str>>>], ) -> Result<(), GroupInfoError> { self.group_info = GroupInfo::new( captures.iter().map(|x| x.iter().map(|y| y.as_ref())), )?; Ok(()) } /// Remap the transitions in every state of this NFA using the given map. /// The given map should be indexed according to state ID namespace used by /// the transitions of the states currently in this NFA. /// /// This is particularly useful to the NFA builder, since it is convenient /// to add NFA states in order to produce their final IDs. Then, after all /// of the intermediate "empty" states (unconditional epsilon transitions) /// have been removed from the builder's representation, we can re-map all /// of the transitions in the states already added to their final IDs. pub(super) fn remap(&mut self, old_to_new: &[StateID]) { for state in &mut self.states { state.remap(old_to_new); } self.start_anchored = old_to_new[self.start_anchored]; self.start_unanchored = old_to_new[self.start_unanchored]; for id in self.start_pattern.iter_mut() { *id = old_to_new[*id]; } } } impl fmt::Debug for Inner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "thompson::NFA(")?; for (sid, state) in self.states.iter().with_state_ids() { let status = if sid == self.start_anchored { '^' } else if sid == self.start_unanchored { '>' } else { ' ' }; writeln!(f, "{}{:06?}: {:?}", status, sid.as_usize(), state)?; } let pattern_len = self.start_pattern.len(); if pattern_len > 1 { writeln!(f, "")?; for pid in 0..pattern_len { let sid = self.start_pattern[pid]; writeln!(f, "START({:06?}): {:?}", pid, sid.as_usize())?; } } writeln!(f, "")?; writeln!( f, "transition equivalence classes: {:?}", self.byte_classes, )?; writeln!(f, ")")?; Ok(()) } } /// A state in an NFA. /// /// In theory, it can help to conceptualize an `NFA` as a graph consisting of /// `State`s. Each `State` contains its complete set of outgoing transitions. /// /// In practice, it can help to conceptualize an `NFA` as a sequence of /// instructions for a virtual machine. Each `State` says what to do and where /// to go next. /// /// Strictly speaking, the practical interpretation is the most correct one, /// because of the [`Capture`](State::Capture) state. Namely, a `Capture` /// state always forwards execution to another state unconditionally. Its only /// purpose is to cause a side effect: the recording of the current input /// position at a particular location in memory. In this sense, an `NFA` /// has more power than a theoretical non-deterministic finite automaton. /// /// For most uses of this crate, it is likely that one may never even need to /// be aware of this type at all. The main use cases for looking at `State`s /// directly are if you need to write your own search implementation or if you /// need to do some kind of analysis on the NFA. #[derive(Clone, Eq, PartialEq)] pub enum State { /// A state with a single transition that can only be taken if the current /// input symbol is in a particular range of bytes. ByteRange { /// The transition from this state to the next. trans: Transition, }, /// A state with possibly many transitions represented in a sparse fashion. /// Transitions are non-overlapping and ordered lexicographically by input /// range. /// /// In practice, this is used for encoding UTF-8 automata. Its presence is /// primarily an optimization that avoids many additional unconditional /// epsilon transitions (via [`Union`](State::Union) states), and thus /// decreases the overhead of traversing the NFA. This can improve both /// matching time and DFA construction time. Sparse(SparseTransitions), /// A dense representation of a state with multiple transitions. Dense(DenseTransitions), /// A conditional epsilon transition satisfied via some sort of /// look-around. Look-around is limited to anchor and word boundary /// assertions. /// /// Look-around states are meant to be evaluated while performing epsilon /// closure (computing the set of states reachable from a particular state /// via only epsilon transitions). If the current position in the haystack /// satisfies the look-around assertion, then you're permitted to follow /// that epsilon transition. Look { /// The look-around assertion that must be satisfied before moving /// to `next`. look: Look, /// The state to transition to if the look-around assertion is /// satisfied. next: StateID, }, /// An alternation such that there exists an epsilon transition to all /// states in `alternates`, where matches found via earlier transitions /// are preferred over later transitions. Union { /// An ordered sequence of unconditional epsilon transitions to other /// states. Transitions earlier in the sequence are preferred over /// transitions later in the sequence. alternates: Box<[StateID]>, }, /// An alternation such that there exists precisely two unconditional /// epsilon transitions, where matches found via `alt1` are preferred over /// matches found via `alt2`. /// /// This state exists as a common special case of Union where there are /// only two alternates. In this case, we don't need any allocations to /// represent the state. This saves a bit of memory and also saves an /// additional memory access when traversing the NFA. BinaryUnion { /// An unconditional epsilon transition to another NFA state. This /// is preferred over `alt2`. alt1: StateID, /// An unconditional epsilon transition to another NFA state. Matches /// reported via this transition should only be reported if no matches /// were found by following `alt1`. alt2: StateID, }, /// An empty state that records a capture location. /// /// From the perspective of finite automata, this is precisely equivalent /// to an unconditional epsilon transition, but serves the purpose of /// instructing NFA simulations to record additional state when the finite /// state machine passes through this epsilon transition. /// /// `slot` in this context refers to the specific capture group slot /// offset that is being recorded. Each capturing group has two slots /// corresponding to the start and end of the matching portion of that /// group. /// /// The pattern ID and capture group index are also included in this state /// in case they are useful. But mostly, all you'll need is `next` and /// `slot`. Capture { /// The state to transition to, unconditionally. next: StateID, /// The pattern ID that this capture belongs to. pattern_id: PatternID, /// The capture group index that this capture belongs to. Capture group /// indices are local to each pattern. For example, when capturing /// groups are enabled, every pattern has a capture group at index /// `0`. group_index: SmallIndex, /// The slot index for this capture. Every capturing group has two /// slots: one for the start haystack offset and one for the end /// haystack offset. Unlike capture group indices, slot indices are /// global across all patterns in this NFA. That is, each slot belongs /// to a single pattern, but there is only one slot at index `i`. slot: SmallIndex, }, /// A state that cannot be transitioned out of. This is useful for cases /// where you want to prevent matching from occurring. For example, if your /// regex parser permits empty character classes, then one could choose /// a `Fail` state to represent them. (An empty character class can be /// thought of as an empty set. Since nothing is in an empty set, they can /// never match anything.) Fail, /// A match state. There is at least one such occurrence of this state for /// each regex that can match that is in this NFA. Match { /// The matching pattern ID. pattern_id: PatternID, }, } impl State { /// Returns true if and only if this state contains one or more epsilon /// transitions. /// /// In practice, a state has no outgoing transitions (like `Match`), has /// only non-epsilon transitions (like `ByteRange`) or has only epsilon /// transitions (like `Union`). /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::{State, Transition}, /// util::primitives::{PatternID, StateID, SmallIndex}, /// }; /// /// // Capture states are epsilon transitions. /// let state = State::Capture { /// next: StateID::ZERO, /// pattern_id: PatternID::ZERO, /// group_index: SmallIndex::ZERO, /// slot: SmallIndex::ZERO, /// }; /// assert!(state.is_epsilon()); /// /// // ByteRange states are not. /// let state = State::ByteRange { /// trans: Transition { start: b'a', end: b'z', next: StateID::ZERO }, /// }; /// assert!(!state.is_epsilon()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_epsilon(&self) -> bool { match *self { State::ByteRange { .. } | State::Sparse { .. } | State::Dense { .. } | State::Fail | State::Match { .. } => false, State::Look { .. } | State::Union { .. } | State::BinaryUnion { .. } | State::Capture { .. } => true, } } /// Returns the heap memory usage of this NFA state in bytes. fn memory_usage(&self) -> usize { match *self { State::ByteRange { .. } | State::Look { .. } | State::BinaryUnion { .. } | State::Capture { .. } | State::Match { .. } | State::Fail => 0, State::Sparse(SparseTransitions { ref transitions }) => { transitions.len() * mem::size_of::<Transition>() } State::Dense { .. } => 256 * mem::size_of::<StateID>(), State::Union { ref alternates } => { alternates.len() * mem::size_of::<StateID>() } } } /// Remap the transitions in this state using the given map. Namely, the /// given map should be indexed according to the transitions currently /// in this state. /// /// This is used during the final phase of the NFA compiler, which turns /// its intermediate NFA into the final NFA. fn remap(&mut self, remap: &[StateID]) { match *self { State::ByteRange { ref mut trans } => { trans.next = remap[trans.next] } State::Sparse(SparseTransitions { ref mut transitions }) => { for t in transitions.iter_mut() { t.next = remap[t.next]; } } State::Dense(DenseTransitions { ref mut transitions }) => { for sid in transitions.iter_mut() { *sid = remap[*sid]; } } State::Look { ref mut next, .. } => *next = remap[*next], State::Union { ref mut alternates } => { for alt in alternates.iter_mut() { *alt = remap[*alt]; } } State::BinaryUnion { ref mut alt1, ref mut alt2 } => { *alt1 = remap[*alt1]; *alt2 = remap[*alt2]; } State::Capture { ref mut next, .. } => *next = remap[*next], State::Fail => {} State::Match { .. } => {} } } } impl fmt::Debug for State { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { State::ByteRange { ref trans } => trans.fmt(f), State::Sparse(SparseTransitions { ref transitions }) => { let rs = transitions .iter() .map(|t| format!("{:?}", t)) .collect::<Vec<String>>() .join(", "); write!(f, "sparse({})", rs) } State::Dense(ref dense) => { write!(f, "dense(")?; for (i, t) in dense.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{:?}", t)?; } write!(f, ")") } State::Look { ref look, next } => { write!(f, "{:?} => {:?}", look, next.as_usize()) } State::Union { ref alternates } => { let alts = alternates .iter() .map(|id| format!("{:?}", id.as_usize())) .collect::<Vec<String>>() .join(", "); write!(f, "union({})", alts) } State::BinaryUnion { alt1, alt2 } => { write!( f, "binary-union({}, {})", alt1.as_usize(), alt2.as_usize() ) } State::Capture { next, pattern_id, group_index, slot } => { write!( f, "capture(pid={:?}, group={:?}, slot={:?}) => {:?}", pattern_id.as_usize(), group_index.as_usize(), slot.as_usize(), next.as_usize(), ) } State::Fail => write!(f, "FAIL"), State::Match { pattern_id } => { write!(f, "MATCH({:?})", pattern_id.as_usize()) } } } } /// A sequence of transitions used to represent a sparse state. /// /// This is the primary representation of a [`Sparse`](State::Sparse) state. /// It corresponds to a sorted sequence of transitions with non-overlapping /// byte ranges. If the byte at the current position in the haystack matches /// one of the byte ranges, then the finite state machine should take the /// corresponding transition. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SparseTransitions { /// The sorted sequence of non-overlapping transitions. pub transitions: Box<[Transition]>, } impl SparseTransitions { /// This follows the matching transition for a particular byte. /// /// The matching transition is found by looking for a matching byte /// range (there is at most one) corresponding to the position `at` in /// `haystack`. /// /// If `at >= haystack.len()`, then this returns `None`. #[inline] pub fn matches(&self, haystack: &[u8], at: usize) -> Option<StateID> { haystack.get(at).and_then(|&b| self.matches_byte(b)) } /// This follows the matching transition for any member of the alphabet. /// /// The matching transition is found by looking for a matching byte /// range (there is at most one) corresponding to the position `at` in /// `haystack`. If the given alphabet unit is [`EOI`](alphabet::Unit::eoi), /// then this always returns `None`. #[inline] pub(crate) fn matches_unit( &self, unit: alphabet::Unit, ) -> Option<StateID> { unit.as_u8().map_or(None, |byte| self.matches_byte(byte)) } /// This follows the matching transition for a particular byte. /// /// The matching transition is found by looking for a matching byte range /// (there is at most one) corresponding to the byte given. #[inline] pub fn matches_byte(&self, byte: u8) -> Option<StateID> { for t in self.transitions.iter() { if t.start > byte { break; } else if t.matches_byte(byte) { return Some(t.next); } } None /* // This is an alternative implementation that uses binary search. In // some ad hoc experiments, like // // smallishru=OpenSubtitles2018.raw.sample.smallish.ru // regex-cli find nfa thompson pikevm -b "@$smallishru" '\b\w+\b' // // I could not observe any improvement, and in fact, things seemed to // be a bit slower. I can see an improvement in at least one benchmark: // // allcpssmall=all-codepoints-utf8-10x // regex-cli find nfa thompson pikevm @$allcpssmall '\pL{100}' // // Where total search time goes from 3.2s to 2.4s when using binary // search. self.transitions .binary_search_by(|t| { if t.end < byte { core::cmp::Ordering::Less } else if t.start > byte { core::cmp::Ordering::Greater } else { core::cmp::Ordering::Equal } }) .ok() .map(|i| self.transitions[i].next) */ } } /// A sequence of transitions used to represent a dense state. /// /// This is the primary representation of a [`Dense`](State::Dense) state. It /// provides constant time matching. That is, given a byte in a haystack and /// a `DenseTransitions`, one can determine if the state matches in constant /// time. /// /// This is in contrast to `SparseTransitions`, whose time complexity is /// necessarily bigger than constant time. Also in contrast, `DenseTransitions` /// usually requires (much) more heap memory. #[derive(Clone, Debug, Eq, PartialEq)] pub struct DenseTransitions { /// A dense representation of this state's transitions on the heap. This /// always has length 256. pub transitions: Box<[StateID]>, } impl DenseTransitions { /// This follows the matching transition for a particular byte. /// /// The matching transition is found by looking for a transition that /// doesn't correspond to `StateID::ZERO` for the byte `at` the given /// position in `haystack`. /// /// If `at >= haystack.len()`, then this returns `None`. #[inline] pub fn matches(&self, haystack: &[u8], at: usize) -> Option<StateID> { haystack.get(at).and_then(|&b| self.matches_byte(b)) } /// This follows the matching transition for any member of the alphabet. /// /// The matching transition is found by looking for a transition that /// doesn't correspond to `StateID::ZERO` for the byte `at` the given /// position in `haystack`. /// /// If `at >= haystack.len()` or if the given alphabet unit is /// [`EOI`](alphabet::Unit::eoi), then this returns `None`. #[inline] pub(crate) fn matches_unit( &self, unit: alphabet::Unit, ) -> Option<StateID> { unit.as_u8().map_or(None, |byte| self.matches_byte(byte)) } /// This follows the matching transition for a particular byte. /// /// The matching transition is found by looking for a transition that /// doesn't correspond to `StateID::ZERO` for the given `byte`. /// /// If `at >= haystack.len()`, then this returns `None`. #[inline] pub fn matches_byte(&self, byte: u8) -> Option<StateID> { let next = self.transitions[usize::from(byte)]; if next == StateID::ZERO { None } else { Some(next) } } /* /// The dense state optimization isn't currently enabled, so permit a /// little bit of dead code. pub(crate) fn from_sparse(sparse: &SparseTransitions) -> DenseTransitions { let mut dense = vec![StateID::ZERO; 256]; for t in sparse.transitions.iter() { for b in t.start..=t.end { dense[usize::from(b)] = t.next; } } DenseTransitions { transitions: dense.into_boxed_slice() } } */ /// Returns an iterator over all transitions that don't point to /// `StateID::ZERO`. pub(crate) fn iter(&self) -> impl Iterator<Item = Transition> + '_ { use crate::util::int::Usize; self.transitions .iter() .enumerate() .filter(|&(_, &sid)| sid != StateID::ZERO) .map(|(byte, &next)| Transition { start: byte.as_u8(), end: byte.as_u8(), next, }) } } /// A single transition to another state. /// /// This transition may only be followed if the current byte in the haystack /// falls in the inclusive range of bytes specified. #[derive(Clone, Copy, Eq, Hash, PartialEq)] pub struct Transition { /// The inclusive start of the byte range. pub start: u8, /// The inclusive end of the byte range. pub end: u8, /// The identifier of the state to transition to. pub next: StateID, } impl Transition { /// Returns true if the position `at` in `haystack` falls in this /// transition's range of bytes. /// /// If `at >= haystack.len()`, then this returns `false`. pub fn matches(&self, haystack: &[u8], at: usize) -> bool { haystack.get(at).map_or(false, |&b| self.matches_byte(b)) } /// Returns true if the given alphabet unit falls in this transition's /// range of bytes. If the given unit is [`EOI`](alphabet::Unit::eoi), then /// this returns `false`. pub fn matches_unit(&self, unit: alphabet::Unit) -> bool { unit.as_u8().map_or(false, |byte| self.matches_byte(byte)) } /// Returns true if the given byte falls in this transition's range of /// bytes. pub fn matches_byte(&self, byte: u8) -> bool { self.start <= byte && byte <= self.end } } impl fmt::Debug for Transition { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use crate::util::escape::DebugByte; let Transition { start, end, next } = *self; if self.start == self.end { write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize()) } else { write!( f, "{:?}-{:?} => {:?}", DebugByte(start), DebugByte(end), next.as_usize(), ) } } } /// An iterator over all pattern IDs in an NFA. /// /// This iterator is created by [`NFA::patterns`]. /// /// The lifetime parameter `'a` refers to the lifetime of the NFA from which /// this pattern iterator was created. #[derive(Debug)] pub struct PatternIter<'a> { it: PatternIDIter, /// We explicitly associate a lifetime with this iterator even though we /// don't actually borrow anything from the NFA. We do this for backward /// compatibility purposes. If we ever do need to borrow something from /// the NFA, then we can and just get rid of this marker without breaking /// the public API. _marker: core::marker::PhantomData<&'a ()>, } impl<'a> Iterator for PatternIter<'a> { type Item = PatternID; fn next(&mut self) -> Option<PatternID> { self.it.next() } } #[cfg(all(test, feature = "nfa-pikevm"))] mod tests { use super::*; use crate::{nfa::thompson::pikevm::PikeVM, Input}; // This asserts that an NFA state doesn't have its size changed. It is // *really* easy to accidentally increase the size, and thus potentially // dramatically increase the memory usage of every NFA. // // This assert doesn't mean we absolutely cannot increase the size of an // NFA state. We can. It's just here to make sure we do it knowingly and // intentionally. #[test] fn state_has_small_size() { #[cfg(target_pointer_width = "64")] assert_eq!(24, core::mem::size_of::<State>()); #[cfg(target_pointer_width = "32")] assert_eq!(20, core::mem::size_of::<State>()); } #[test] fn always_match() { let re = PikeVM::new_from_nfa(NFA::always_match()).unwrap(); let mut cache = re.create_cache(); let mut caps = re.create_captures(); let mut find = |haystack, start, end| { let input = Input::new(haystack).range(start..end); re.search(&mut cache, &input, &mut caps); caps.get_match().map(|m| m.end()) }; assert_eq!(Some(0), find("", 0, 0)); assert_eq!(Some(0), find("a", 0, 1)); assert_eq!(Some(1), find("a", 1, 1)); assert_eq!(Some(0), find("ab", 0, 2)); assert_eq!(Some(1), find("ab", 1, 2)); assert_eq!(Some(2), find("ab", 2, 2)); } #[test] fn never_match() { let re = PikeVM::new_from_nfa(NFA::never_match()).unwrap(); let mut cache = re.create_cache(); let mut caps = re.create_captures(); let mut find = |haystack, start, end| { let input = Input::new(haystack).range(start..end); re.search(&mut cache, &input, &mut caps); caps.get_match().map(|m| m.end()) }; assert_eq!(None, find("", 0, 0)); assert_eq!(None, find("a", 0, 1)); assert_eq!(None, find("a", 1, 1)); assert_eq!(None, find("ab", 0, 2)); assert_eq!(None, find("ab", 1, 2)); assert_eq!(None, find("ab", 2, 2)); } } <file_sep>/regex-capi/src/error.rs use std::ffi; use std::ffi::CString; use std::fmt; use std::str; use libc::c_char; use regex; #[derive(Debug)] pub struct Error { message: Option<CString>, kind: ErrorKind, } #[derive(Debug)] pub enum ErrorKind { None, Str(str::Utf8Error), Regex(regex::Error), Nul(ffi::NulError), } impl Error { pub fn new(kind: ErrorKind) -> Error { Error { message: None, kind: kind } } pub fn is_err(&self) -> bool { match self.kind { ErrorKind::None => false, ErrorKind::Str(_) | ErrorKind::Regex(_) | ErrorKind::Nul(_) => { true } } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.kind { ErrorKind::None => write!(f, "no error"), ErrorKind::Str(ref e) => e.fmt(f), ErrorKind::Regex(ref e) => e.fmt(f), ErrorKind::Nul(ref e) => e.fmt(f), } } } ffi_fn! { fn rure_error_new() -> *mut Error { Box::into_raw(Box::new(Error::new(ErrorKind::None))) } } ffi_fn! { fn rure_error_free(err: *mut Error) { unsafe { drop(Box::from_raw(err)); } } } ffi_fn! { fn rure_error_message(err: *mut Error) -> *const c_char { let err = unsafe { &mut *err }; let cmsg = match CString::new(format!("{}", err)) { Ok(msg) => msg, Err(err) => { // I guess this can probably happen if the regex itself has a // NUL, and that NUL re-occurs in the context presented by the // error message. In this case, just show as much as we can. let nul = err.nul_position(); let msg = err.into_vec(); CString::new(msg[0..nul].to_owned()).unwrap() } }; let p = cmsg.as_ptr(); err.message = Some(cmsg); p } } <file_sep>/regex-cli/cmd/find/half/mod.rs use std::io::{stdout, Write}; use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::{HalfMatch, Input, MatchError, PatternID}, }; use crate::{ args::{self, Configurable, Usage}, util::{self, Table}, }; mod dfa; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = r#"\ Executes a search for "half" matches. That is, matches that only have the end offset of a match (as well as the pattern that matched). This type of search can be done by any regex engine, but for this command, we only expose regex engines that can specifically do less work to report a half match. For example, a DFA regex can report a half match by only searching with its forward DFA. Indeed, if all we care about are half matches, we can avoid compiling a reverse DFA entirely. Also, since half matches cannot know the full bounds of a match, this only prints the end offset of each match and not the matched contents. (Since the start offset of each match is not known.) USAGE: regex-cli find half <engine> ENGINES: dense Search with the dense DFA regex engine. hybrid Search with the lazy DFA regex engine. meta Search with the meta regex engine. regex Search with the top-level API regex engine. sparse Search with the sparse DFA regex engine. "#; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "dense" => dfa::run_dense(p), "hybrid" => dfa::run_hybrid(p), "meta" => run_meta(p), "regex" => run_regex(p), "sparse" => dfa::run_sparse(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } #[derive(Debug, Default)] struct Args { overlapping: bool, } impl Configurable for Args { fn configure( &mut self, _: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("overlapping") => { self.overlapping = true; } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &[Usage] = &[Usage::new( "--overlapping", "Search for overlapping matches.", r#" This flag enables overlapping mode, where the regex engine will attempt to find all possible matches reported by the underlying matcher. Generally this flag is used in conjunction with '--match-kind all'. If the match semantics are not set to compile all possible matches in the underlying automaton, then the results will likely be counter-intuitive. "#, )]; USAGES } } fn run_regex(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for half matches using the top-level API regex engine. USAGE: regex-cli find half regex [-p <pattern> ...] <haystack-path> regex-cli find half regex [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut api = args::api::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut haystack, &mut syntax, &mut api, &mut find, ], )?; let pats = patterns.get()?; let syn = syntax.syntax()?; let mut table = Table::empty(); let (re, time) = util::timeitr(|| api.from_patterns(&syn, &pats))?; table.add("build regex time", time); // The top-level API doesn't support regex-automata's more granular Input // abstraction. let input = args::input::Config::default(); let search = |input: &Input<'_>| { Ok(re .shortest_match_at(input.haystack(), input.start()) .map(|offset| HalfMatch::new(PatternID::ZERO, offset))) }; if find.count { run_counts(&mut table, &common, &find, &input, &haystack, 1, search)?; } else { run_search(&mut table, &common, &find, &input, &haystack, search)?; } Ok(()) } fn run_meta(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for half matches using the meta regex engine. USAGE: regex-cli find half meta [-p <pattern> ...] <haystack-path> regex-cli find half meta [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut meta = args::meta::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut meta, &mut find, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let re = if meta.build_from_patterns() { let (re, time) = util::timeitr(|| meta.from_patterns(&syntax, &pats))?; table.add("build meta time", time); re } else { let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (re, time) = util::timeitr(|| meta.from_hirs(&hirs))?; table.add("build meta time", time); re }; let search = |input: &Input<'_>| Ok(re.search_half(input)); if find.count { run_counts( &mut table, &common, &find, &input, &haystack, re.pattern_len(), search, )?; } else { run_search(&mut table, &common, &find, &input, &haystack, search)?; } Ok(()) } /// A function that takes in a bunch of configuration, runs the given search /// routine, and prints out a table of counts. fn run_counts( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, pattern_len: usize, mut search: impl FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (counts, time) = util::timeitr(|| { let mut counts = vec![0; pattern_len]; for _ in 0..find.repeat() { let mut it = regex_automata::util::iter::Searcher::new(input.clone()); while let Some(m) = it.try_advance_half(&mut search)? { counts[m.pattern().as_usize()] += 1; } } Ok::<_, anyhow::Error>(counts) })?; table.add("search time", time); table.add("total matches", counts.iter().copied().sum::<u64>()); if common.table() { table.print(&mut out)?; } if !common.quiet { for (i, &count) in counts.iter().enumerate() { let pid = PatternID::new(i).context("invalid pattern ID")?; writeln!(out, "{}:{}", pid.as_usize(), count)?; } } Ok(()) }) } /// Like `run_counts`, but prints the actual matches instead. fn run_search( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, mut search: impl FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (matches, time) = util::timeitr(|| { let mut matches = vec![]; for _ in 0..find.repeat() { let mut it = regex_automata::util::iter::Searcher::new(input.clone()); while let Some(m) = it.try_advance_half(&mut search)? { matches.push(m); } } Ok::<_, anyhow::Error>(matches) })?; table.add("search time", time); table.add("total matches", matches.len()); if common.table() { table.print(&mut out)?; } if !common.quiet { for m in matches.iter() { writeln!(out, "{}:{}", m.pattern().as_usize(), m.offset())?; } } Ok(()) }) } /// Like `run_counts`, but does an overlapping search. fn run_counts_overlapping<S>( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, pattern_len: usize, mut start_state: impl FnMut() -> S, mut get_match: impl FnMut(&S) -> Option<HalfMatch>, mut search: impl FnMut(&Input<'_>, &mut S) -> Result<(), MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (counts, time) = util::timeitr(|| { let mut counts = vec![0; pattern_len]; for _ in 0..find.repeat() { let mut state = start_state(); loop { search(&input, &mut state)?; match get_match(&state) { None => break, Some(hm) => counts[hm.pattern().as_usize()] += 1, } } } Ok::<_, anyhow::Error>(counts) })?; table.add("search time", time); table.add("total matches", counts.iter().copied().sum::<u64>()); if common.table() { table.print(&mut out)?; } if !common.quiet { for (i, &count) in counts.iter().enumerate() { let pid = PatternID::new(i).context("invalid pattern ID")?; writeln!(out, "{}:{}", pid.as_usize(), count)?; } } Ok(()) }) } /// Like `run_search`, but does an overlapping search. fn run_search_overlapping<S>( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, mut start_state: impl FnMut() -> S, mut get_match: impl FnMut(&S) -> Option<HalfMatch>, mut search: impl FnMut(&Input<'_>, &mut S) -> Result<(), MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (matches, time) = util::timeitr(|| { let mut matches = vec![]; for _ in 0..find.repeat() { let mut state = start_state(); loop { search(&input, &mut state)?; match get_match(&state) { None => break, Some(hm) => matches.push(hm), } } } Ok::<_, anyhow::Error>(matches) })?; table.add("search time", time); table.add("total matches", matches.len()); if common.table() { table.print(&mut out)?; } if !common.quiet { for m in matches.iter() { writeln!(out, "{}:{}", m.pattern().as_usize(), m.offset())?; } } Ok(()) }) } <file_sep>/regex-lite/src/hir/mod.rs use alloc::{boxed::Box, string::String, vec, vec::Vec}; use crate::{error::Error, utf8}; mod parse; /// Escapes all regular expression meta characters in `pattern`. /// /// The string returned may be safely used as a literal in a regular /// expression. pub fn escape(pattern: &str) -> String { let mut buf = String::new(); buf.reserve(pattern.len()); for ch in pattern.chars() { if is_meta_character(ch) { buf.push('\\'); } buf.push(ch); } buf } /// Returns true if the given character has significance in a regex. /// /// Generally speaking, these are the only characters which _must_ be escaped /// in order to match their literal meaning. For example, to match a literal /// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For /// example, `-` is treated as a meta character because of its significance /// for writing ranges inside of character classes, but the regex `-` will /// match a literal `-` because `-` has no special meaning outside of character /// classes. /// /// In order to determine whether a character may be escaped at all, the /// [`is_escapeable_character`] routine should be used. The difference between /// `is_meta_character` and `is_escapeable_character` is that the latter will /// return true for some characters that are _not_ meta characters. For /// example, `%` and `\%` both match a literal `%` in all contexts. In other /// words, `is_escapeable_character` includes "superfluous" escapes. /// /// Note that the set of characters for which this function returns `true` or /// `false` is fixed and won't change in a semver compatible release. (In this /// case, "semver compatible release" actually refers to the `regex` crate /// itself, since reducing or expanding the set of meta characters would be a /// breaking change for not just `regex-syntax` but also `regex` itself.) fn is_meta_character(c: char) -> bool { match c { '\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{' | '}' | '^' | '$' | '#' | '&' | '-' | '~' => true, _ => false, } } /// Returns true if the given character can be escaped in a regex. /// /// This returns true in all cases that `is_meta_character` returns true, but /// also returns true in some cases where `is_meta_character` returns false. /// For example, `%` is not a meta character, but it is escapeable. That is, /// `%` and `\%` both match a literal `%` in all contexts. /// /// The purpose of this routine is to provide knowledge about what characters /// may be escaped. Namely, most regex engines permit "superfluous" escapes /// where characters without any special significance may be escaped even /// though there is no actual _need_ to do so. /// /// This will return false for some characters. For example, `e` is not /// escapeable. Therefore, `\e` will either result in a parse error (which is /// true today), or it could backwards compatibly evolve into a new construct /// with its own meaning. Indeed, that is the purpose of banning _some_ /// superfluous escapes: it provides a way to evolve the syntax in a compatible /// manner. fn is_escapeable_character(c: char) -> bool { // Certainly escapeable if it's a meta character. if is_meta_character(c) { return true; } // Any character that isn't ASCII is definitely not escapeable. There's // no real need to allow things like \☃ right? if !c.is_ascii() { return false; } // Otherwise, we basically say that everything is escapeable unless it's a // letter or digit. Things like \3 are either octal (when enabled) or an // error, and we should keep it that way. Otherwise, letters are reserved // for adding new syntax in a backwards compatible way. match c { '0'..='9' | 'A'..='Z' | 'a'..='z' => false, // While not currently supported, we keep these as not escapeable to // give us some flexibility with respect to supporting the \< and // \> word boundary assertions in the future. By rejecting them as // escapeable, \< and \> will result in a parse error. Thus, we can // turn them into something else in the future without it being a // backwards incompatible change. '<' | '>' => false, _ => true, } } /// The configuration for a regex parser. #[derive(Clone, Copy, Debug)] pub(crate) struct Config { /// The maximum number of times we're allowed to recurse. /// /// Note that unlike the regex-syntax parser, we actually use recursion in /// this parser for simplicity. My hope is that by setting a conservative /// default call limit and providing a way to configure it, that we can /// keep this simplification. But if we must, we can re-work the parser to /// put the call stack on the heap like regex-syntax does. pub(crate) nest_limit: u32, /// Various flags that control how a pattern is interpreted. pub(crate) flags: Flags, } impl Default for Config { fn default() -> Config { Config { nest_limit: 50, flags: Flags::default() } } } /// Various flags that control the interpretation of the pattern. /// /// These can be set via explicit configuration in code, or change dynamically /// during parsing via inline flags. For example, `foo(?i:bar)baz` will match /// `foo` and `baz` case sensitiviely and `bar` case insensitively (assuming a /// default configuration). #[derive(Clone, Copy, Debug, Default)] pub(crate) struct Flags { /// Whether to match case insensitively. /// /// This is the `i` flag. pub(crate) case_insensitive: bool, /// Whether `^` and `$` should be treated as line anchors or not. /// /// This is the `m` flag. pub(crate) multi_line: bool, /// Whether `.` should match line terminators or not. /// /// This is the `s` flag. pub(crate) dot_matches_new_line: bool, /// Whether to swap the meaning of greedy and non-greedy operators. /// /// This is the `U` flag. pub(crate) swap_greed: bool, /// Whether to enable CRLF mode. /// /// This is the `R` flag. pub(crate) crlf: bool, /// Whether to ignore whitespace. i.e., verbose mode. /// /// This is the `x` flag. pub(crate) ignore_whitespace: bool, } #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct Hir { kind: HirKind, is_start_anchored: bool, is_match_empty: bool, static_explicit_captures_len: Option<usize>, } #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum HirKind { Empty, Char(char), Class(Class), Look(Look), Repetition(Repetition), Capture(Capture), Concat(Vec<Hir>), Alternation(Vec<Hir>), } impl Hir { /// Parses the given pattern string with the given configuration into a /// structured representation. If the pattern is invalid, then an error /// is returned. pub(crate) fn parse(config: Config, pattern: &str) -> Result<Hir, Error> { self::parse::Parser::new(config, pattern).parse() } /// Returns the underlying kind of this high-level intermediate /// representation. /// /// Note that there is explicitly no way to build an `Hir` directly from /// an `HirKind`. If you need to do that, then you must do case analysis /// on the `HirKind` and call the appropriate smart constructor on `Hir`. pub(crate) fn kind(&self) -> &HirKind { &self.kind } /// Returns true if and only if this Hir expression can only match at the /// beginning of a haystack. pub(crate) fn is_start_anchored(&self) -> bool { self.is_start_anchored } /// Returns true if and only if this Hir expression can match the empty /// string. pub(crate) fn is_match_empty(&self) -> bool { self.is_match_empty } /// If the pattern always reports the same number of matching capture groups /// for every match, then this returns the number of those groups. This /// doesn't include the implicit group found in every pattern. pub(crate) fn static_explicit_captures_len(&self) -> Option<usize> { self.static_explicit_captures_len } fn fail() -> Hir { let kind = HirKind::Class(Class { ranges: vec![] }); Hir { kind, is_start_anchored: false, is_match_empty: false, static_explicit_captures_len: Some(0), } } fn empty() -> Hir { let kind = HirKind::Empty; Hir { kind, is_start_anchored: false, is_match_empty: true, static_explicit_captures_len: Some(0), } } fn char(ch: char) -> Hir { let kind = HirKind::Char(ch); Hir { kind, is_start_anchored: false, is_match_empty: false, static_explicit_captures_len: Some(0), } } fn class(class: Class) -> Hir { let kind = HirKind::Class(class); Hir { kind, is_start_anchored: false, is_match_empty: false, static_explicit_captures_len: Some(0), } } fn look(look: Look) -> Hir { let kind = HirKind::Look(look); Hir { kind, is_start_anchored: matches!(look, Look::Start), is_match_empty: true, static_explicit_captures_len: Some(0), } } fn repetition(rep: Repetition) -> Hir { if rep.min == 0 && rep.max == Some(0) { return Hir::empty(); } else if rep.min == 1 && rep.max == Some(1) { return *rep.sub; } let is_start_anchored = rep.min > 0 && rep.sub.is_start_anchored; let is_match_empty = rep.min == 0 || rep.sub.is_match_empty; let mut static_explicit_captures_len = rep.sub.static_explicit_captures_len; // If the static captures len of the sub-expression is not known or // is greater than zero, then it automatically propagates to the // repetition, regardless of the repetition. Otherwise, it might // change, but only when the repetition can match 0 times. if rep.min == 0 && static_explicit_captures_len.map_or(false, |len| len > 0) { // If we require a match 0 times, then our captures len is // guaranteed to be zero. Otherwise, if we *can* match the empty // string, then it's impossible to know how many captures will be // in the resulting match. if rep.max == Some(0) { static_explicit_captures_len = Some(0); } else { static_explicit_captures_len = None; } } Hir { kind: HirKind::Repetition(rep), is_start_anchored, is_match_empty, static_explicit_captures_len, } } fn capture(cap: Capture) -> Hir { let is_start_anchored = cap.sub.is_start_anchored; let is_match_empty = cap.sub.is_match_empty; let static_explicit_captures_len = cap .sub .static_explicit_captures_len .map(|len| len.saturating_add(1)); let kind = HirKind::Capture(cap); Hir { kind, is_start_anchored, is_match_empty, static_explicit_captures_len, } } fn concat(mut subs: Vec<Hir>) -> Hir { if subs.is_empty() { Hir::empty() } else if subs.len() == 1 { subs.pop().unwrap() } else { let is_start_anchored = subs[0].is_start_anchored; let mut is_match_empty = true; let mut static_explicit_captures_len = Some(0usize); for sub in subs.iter() { is_match_empty = is_match_empty && sub.is_match_empty; static_explicit_captures_len = static_explicit_captures_len .and_then(|len1| { Some((len1, sub.static_explicit_captures_len?)) }) .and_then(|(len1, len2)| Some(len1.saturating_add(len2))); } Hir { kind: HirKind::Concat(subs), is_start_anchored, is_match_empty, static_explicit_captures_len, } } } fn alternation(mut subs: Vec<Hir>) -> Hir { if subs.is_empty() { Hir::fail() } else if subs.len() == 1 { subs.pop().unwrap() } else { let mut it = subs.iter().peekable(); let mut is_start_anchored = it.peek().map_or(false, |sub| sub.is_start_anchored); let mut is_match_empty = it.peek().map_or(false, |sub| sub.is_match_empty); let mut static_explicit_captures_len = it.peek().and_then(|sub| sub.static_explicit_captures_len); for sub in it { is_start_anchored = is_start_anchored && sub.is_start_anchored; is_match_empty = is_match_empty || sub.is_match_empty; if static_explicit_captures_len != sub.static_explicit_captures_len { static_explicit_captures_len = None; } } Hir { kind: HirKind::Alternation(subs), is_start_anchored, is_match_empty, static_explicit_captures_len, } } } } #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct Class { pub(crate) ranges: Vec<ClassRange>, } impl Class { /// Create a new class from the given ranges. The ranges may be provided /// in any order or may even overlap. They will be automatically /// canonicalized. fn new<I: IntoIterator<Item = ClassRange>>(ranges: I) -> Class { let mut class = Class { ranges: ranges.into_iter().collect() }; class.canonicalize(); class } /// Expand this class such that it matches the ASCII codepoints in this set /// case insensitively. fn ascii_case_fold(&mut self) { let len = self.ranges.len(); for i in 0..len { if let Some(folded) = self.ranges[i].ascii_case_fold() { self.ranges.push(folded); } } self.canonicalize(); } /// Negate this set. /// /// For all `x` where `x` is any element, if `x` was in this set, then it /// will not be in this set after negation. fn negate(&mut self) { const MIN: char = '\x00'; const MAX: char = char::MAX; if self.ranges.is_empty() { self.ranges.push(ClassRange { start: MIN, end: MAX }); return; } // There should be a way to do this in-place with constant memory, // but I couldn't figure out a simple way to do it. So just append // the negation to the end of this range, and then drain it before // we're done. let drain_end = self.ranges.len(); // If our class doesn't start the minimum possible char, then negation // needs to include all codepoints up to the minimum in this set. if self.ranges[0].start > MIN { self.ranges.push(ClassRange { start: MIN, // OK because we know it's bigger than MIN. end: prev_char(self.ranges[0].start).unwrap(), }); } for i in 1..drain_end { // let lower = self.ranges[i - 1].upper().increment(); // let upper = self.ranges[i].lower().decrement(); // self.ranges.push(I::create(lower, upper)); self.ranges.push(ClassRange { // OK because we know i-1 is never the last range and therefore // there must be a range greater than it. It therefore follows // that 'end' can never be char::MAX, and thus there must be // a next char. start: next_char(self.ranges[i - 1].end).unwrap(), // Since 'i' is guaranteed to never be the first range, it // follows that there is always a range before this and thus // 'start' can never be '\x00'. Thus, there must be a previous // char. end: prev_char(self.ranges[i].start).unwrap(), }); } if self.ranges[drain_end - 1].end < MAX { // let lower = self.ranges[drain_end - 1].upper().increment(); // self.ranges.push(I::create(lower, I::Bound::max_value())); self.ranges.push(ClassRange { // OK because we know 'end' is less than char::MAX, and thus // there is a next char. start: next_char(self.ranges[drain_end - 1].end).unwrap(), end: MAX, }); } self.ranges.drain(..drain_end); // We don't need to canonicalize because we processed the ranges above // in canonical order and the new ranges we added based on those are // also necessarily in canonical order. } /// Converts this set into a canonical ordering. fn canonicalize(&mut self) { if self.is_canonical() { return; } self.ranges.sort(); assert!(!self.ranges.is_empty()); // Is there a way to do this in-place with constant memory? I couldn't // figure out a way to do it. So just append the canonicalization to // the end of this range, and then drain it before we're done. let drain_end = self.ranges.len(); for oldi in 0..drain_end { // If we've added at least one new range, then check if we can // merge this range in the previously added range. if self.ranges.len() > drain_end { let (last, rest) = self.ranges.split_last_mut().unwrap(); if let Some(union) = last.union(&rest[oldi]) { *last = union; continue; } } self.ranges.push(self.ranges[oldi]); } self.ranges.drain(..drain_end); } /// Returns true if and only if this class is in a canonical ordering. fn is_canonical(&self) -> bool { for pair in self.ranges.windows(2) { if pair[0] >= pair[1] { return false; } if pair[0].is_contiguous(&pair[1]) { return false; } } true } } #[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)] pub(crate) struct ClassRange { pub(crate) start: char, pub(crate) end: char, } impl ClassRange { /// Apply simple case folding to this byte range. Only ASCII case mappings /// (for A-Za-z) are applied. /// /// Additional ranges are appended to the given vector. Canonical ordering /// is *not* maintained in the given vector. fn ascii_case_fold(&self) -> Option<ClassRange> { if !(ClassRange { start: 'a', end: 'z' }).is_intersection_empty(self) { let start = core::cmp::max(self.start, 'a'); let end = core::cmp::min(self.end, 'z'); return Some(ClassRange { start: char::try_from(u32::from(start) - 32).unwrap(), end: char::try_from(u32::from(end) - 32).unwrap(), }); } if !(ClassRange { start: 'A', end: 'Z' }).is_intersection_empty(self) { let start = core::cmp::max(self.start, 'A'); let end = core::cmp::min(self.end, 'Z'); return Some(ClassRange { start: char::try_from(u32::from(start) + 32).unwrap(), end: char::try_from(u32::from(end) + 32).unwrap(), }); } None } /// Union the given overlapping range into this range. /// /// If the two ranges aren't contiguous, then this returns `None`. fn union(&self, other: &ClassRange) -> Option<ClassRange> { if !self.is_contiguous(other) { return None; } let start = core::cmp::min(self.start, other.start); let end = core::cmp::max(self.end, other.end); Some(ClassRange { start, end }) } /// Returns true if and only if the two ranges are contiguous. Two ranges /// are contiguous if and only if the ranges are either overlapping or /// adjacent. fn is_contiguous(&self, other: &ClassRange) -> bool { let (s1, e1) = (u32::from(self.start), u32::from(self.end)); let (s2, e2) = (u32::from(other.start), u32::from(other.end)); core::cmp::max(s1, s2) <= core::cmp::min(e1, e2).saturating_add(1) } /// Returns true if and only if the intersection of this range and the /// other range is empty. fn is_intersection_empty(&self, other: &ClassRange) -> bool { let (s1, e1) = (self.start, self.end); let (s2, e2) = (other.start, other.end); core::cmp::max(s1, s2) > core::cmp::min(e1, e2) } } /// The high-level intermediate representation for a look-around assertion. /// /// An assertion match is always zero-length. Also called an "empty match." #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum Look { /// Match the beginning of text. Specifically, this matches at the starting /// position of the input. Start = 1 << 0, /// Match the end of text. Specifically, this matches at the ending /// position of the input. End = 1 << 1, /// Match the beginning of a line or the beginning of text. Specifically, /// this matches at the starting position of the input, or at the position /// immediately following a `\n` character. StartLF = 1 << 2, /// Match the end of a line or the end of text. Specifically, this matches /// at the end position of the input, or at the position immediately /// preceding a `\n` character. EndLF = 1 << 3, /// Match the beginning of a line or the beginning of text. Specifically, /// this matches at the starting position of the input, or at the position /// immediately following either a `\r` or `\n` character, but never after /// a `\r` when a `\n` follows. StartCRLF = 1 << 4, /// Match the end of a line or the end of text. Specifically, this matches /// at the end position of the input, or at the position immediately /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` /// precedes it. EndCRLF = 1 << 5, /// Match an ASCII-only word boundary. That is, this matches a position /// where the left adjacent character and right adjacent character /// correspond to a word and non-word or a non-word and word character. Word = 1 << 6, /// Match an ASCII-only negation of a word boundary. WordNegate = 1 << 7, } impl Look { /// Returns true if the given position in the given haystack matches this /// look-around assertion. pub(crate) fn is_match(&self, haystack: &[u8], at: usize) -> bool { use self::Look::*; match *self { Start => at == 0, End => at == haystack.len(), StartLF => at == 0 || haystack[at - 1] == b'\n', EndLF => at == haystack.len() || haystack[at] == b'\n', StartCRLF => { at == 0 || haystack[at - 1] == b'\n' || (haystack[at - 1] == b'\r' && (at >= haystack.len() || haystack[at] != b'\n')) } EndCRLF => { at == haystack.len() || haystack[at] == b'\r' || (haystack[at] == b'\n' && (at == 0 || haystack[at - 1] != b'\r')) } Word => { let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); let word_after = at < haystack.len() && utf8::is_word_byte(haystack[at]); word_before != word_after } WordNegate => { let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); let word_after = at < haystack.len() && utf8::is_word_byte(haystack[at]); word_before == word_after } } } } /// The high-level intermediate representation of a repetition operator. /// /// A repetition operator permits the repetition of an arbitrary /// sub-expression. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct Repetition { /// The minimum range of the repetition. /// /// Note that special cases like `?`, `+` and `*` all get translated into /// the ranges `{0,1}`, `{1,}` and `{0,}`, respectively. /// /// When `min` is zero, this expression can match the empty string /// regardless of what its sub-expression is. pub(crate) min: u32, /// The maximum range of the repetition. /// /// Note that when `max` is `None`, `min` acts as a lower bound but where /// there is no upper bound. For something like `x{5}` where the min and /// max are equivalent, `min` will be set to `5` and `max` will be set to /// `Some(5)`. pub(crate) max: Option<u32>, /// Whether this repetition operator is greedy or not. A greedy operator /// will match as much as it can. A non-greedy operator will match as /// little as it can. /// /// Typically, operators are greedy by default and are only non-greedy when /// a `?` suffix is used, e.g., `(expr)*` is greedy while `(expr)*?` is /// not. However, this can be inverted via the `U` "ungreedy" flag. pub(crate) greedy: bool, /// The expression being repeated. pub(crate) sub: Box<Hir>, } /// The high-level intermediate representation for a capturing group. /// /// A capturing group always has an index and a child expression. It may /// also have a name associated with it (e.g., `(?P<foo>\w)`), but it's not /// necessary. /// /// Note that there is no explicit representation of a non-capturing group /// in a `Hir`. Instead, non-capturing grouping is handled automatically by /// the recursive structure of the `Hir` itself. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct Capture { /// The capture index of the capture. pub(crate) index: u32, /// The name of the capture, if it exists. pub(crate) name: Option<Box<str>>, /// The expression inside the capturing group, which may be empty. pub(crate) sub: Box<Hir>, } fn next_char(ch: char) -> Option<char> { // Skip over the surrogate range. if ch == '\u{D7FF}' { return Some('\u{E000}'); } // OK because char::MAX < u32::MAX and we handle U+D7FF above. char::from_u32(u32::from(ch).checked_add(1).unwrap()) } fn prev_char(ch: char) -> Option<char> { // Skip over the surrogate range. if ch == '\u{E000}' { return Some('\u{D7FF}'); } // OK because subtracting 1 from any valid scalar value other than 0 // and U+E000 yields a valid scalar value. Some(char::from_u32(u32::from(ch).checked_sub(1)?).unwrap()) } <file_sep>/regex-cli/cmd/generate/serialize/mod.rs use crate::args; mod dfa; pub fn run(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Serializes regex objects to disk, and optionally includes generating Rust source code for embedding and loading those regex objects into your program. USAGE: regex-cli generate serialize <engine> ENGINES: dense Serialize fully compiled dense DFAs or dense regex DFAs. sparse Serialize fully compiled sparse DFAs or sparse regex DFAs. "; match &*args::next_as_command(USAGE, p)? { "dense" => dfa::run_dense(p), "sparse" => dfa::run_sparse(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } <file_sep>/regex-syntax/src/hir/translate.rs /*! Defines a translator that converts an `Ast` to an `Hir`. */ use core::cell::{Cell, RefCell}; use alloc::{boxed::Box, string::ToString, vec, vec::Vec}; use crate::{ ast::{self, Ast, Span, Visitor}, either::Either, hir::{self, Error, ErrorKind, Hir, HirKind}, unicode::{self, ClassQuery}, }; type Result<T> = core::result::Result<T, Error>; /// A builder for constructing an AST->HIR translator. #[derive(Clone, Debug)] pub struct TranslatorBuilder { utf8: bool, line_terminator: u8, flags: Flags, } impl Default for TranslatorBuilder { fn default() -> TranslatorBuilder { TranslatorBuilder::new() } } impl TranslatorBuilder { /// Create a new translator builder with a default c onfiguration. pub fn new() -> TranslatorBuilder { TranslatorBuilder { utf8: true, line_terminator: b'\n', flags: Flags::default(), } } /// Build a translator using the current configuration. pub fn build(&self) -> Translator { Translator { stack: RefCell::new(vec![]), flags: Cell::new(self.flags), utf8: self.utf8, line_terminator: self.line_terminator, } } /// When disabled, translation will permit the construction of a regular /// expression that may match invalid UTF-8. /// /// When enabled (the default), the translator is guaranteed to produce an /// expression that, for non-empty matches, will only ever produce spans /// that are entirely valid UTF-8 (otherwise, the translator will return an /// error). /// /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete /// syntax) will be allowed even though they can produce matches that split /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty" /// matches, and it is expected that the regex engine itself must handle /// these cases if necessary (perhaps by suppressing any zero-width matches /// that split a codepoint). pub fn utf8(&mut self, yes: bool) -> &mut TranslatorBuilder { self.utf8 = yes; self } /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. /// /// Namely, instead of `.` (by default) matching everything except for `\n`, /// this will cause `.` to match everything except for the byte given. /// /// If `.` is used in a context where Unicode mode is enabled and this byte /// isn't ASCII, then an error will be returned. When Unicode mode is /// disabled, then any byte is permitted, but will return an error if UTF-8 /// mode is enabled and it is a non-ASCII byte. /// /// In short, any ASCII value for a line terminator is always okay. But a /// non-ASCII byte might result in an error depending on whether Unicode /// mode or UTF-8 mode are enabled. /// /// Note that if `R` mode is enabled then it always takes precedence and /// the line terminator will be treated as `\r` and `\n` simultaneously. /// /// Note also that this *doesn't* impact the look-around assertions /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional /// configuration in the regex engine itself. pub fn line_terminator(&mut self, byte: u8) -> &mut TranslatorBuilder { self.line_terminator = byte; self } /// Enable or disable the case insensitive flag (`i`) by default. pub fn case_insensitive(&mut self, yes: bool) -> &mut TranslatorBuilder { self.flags.case_insensitive = if yes { Some(true) } else { None }; self } /// Enable or disable the multi-line matching flag (`m`) by default. pub fn multi_line(&mut self, yes: bool) -> &mut TranslatorBuilder { self.flags.multi_line = if yes { Some(true) } else { None }; self } /// Enable or disable the "dot matches any character" flag (`s`) by /// default. pub fn dot_matches_new_line( &mut self, yes: bool, ) -> &mut TranslatorBuilder { self.flags.dot_matches_new_line = if yes { Some(true) } else { None }; self } /// Enable or disable the CRLF mode flag (`R`) by default. pub fn crlf(&mut self, yes: bool) -> &mut TranslatorBuilder { self.flags.crlf = if yes { Some(true) } else { None }; self } /// Enable or disable the "swap greed" flag (`U`) by default. pub fn swap_greed(&mut self, yes: bool) -> &mut TranslatorBuilder { self.flags.swap_greed = if yes { Some(true) } else { None }; self } /// Enable or disable the Unicode flag (`u`) by default. pub fn unicode(&mut self, yes: bool) -> &mut TranslatorBuilder { self.flags.unicode = if yes { None } else { Some(false) }; self } } /// A translator maps abstract syntax to a high level intermediate /// representation. /// /// A translator may be benefit from reuse. That is, a translator can translate /// many abstract syntax trees. /// /// A `Translator` can be configured in more detail via a /// [`TranslatorBuilder`]. #[derive(Clone, Debug)] pub struct Translator { /// Our call stack, but on the heap. stack: RefCell<Vec<HirFrame>>, /// The current flag settings. flags: Cell<Flags>, /// Whether we're allowed to produce HIR that can match arbitrary bytes. utf8: bool, /// The line terminator to use for `.`. line_terminator: u8, } impl Translator { /// Create a new translator using the default configuration. pub fn new() -> Translator { TranslatorBuilder::new().build() } /// Translate the given abstract syntax tree (AST) into a high level /// intermediate representation (HIR). /// /// If there was a problem doing the translation, then an HIR-specific /// error is returned. /// /// The original pattern string used to produce the `Ast` *must* also be /// provided. The translator does not use the pattern string during any /// correct translation, but is used for error reporting. pub fn translate(&mut self, pattern: &str, ast: &Ast) -> Result<Hir> { ast::visit(ast, TranslatorI::new(self, pattern)) } } /// An HirFrame is a single stack frame, represented explicitly, which is /// created for each item in the Ast that we traverse. /// /// Note that technically, this type doesn't represent our entire stack /// frame. In particular, the Ast visitor represents any state associated with /// traversing the Ast itself. #[derive(Clone, Debug)] enum HirFrame { /// An arbitrary HIR expression. These get pushed whenever we hit a base /// case in the Ast. They get popped after an inductive (i.e., recursive) /// step is complete. Expr(Hir), /// A literal that is being constructed, character by character, from the /// AST. We need this because the AST gives each individual character its /// own node. So as we see characters, we peek at the top-most HirFrame. /// If it's a literal, then we add to it. Otherwise, we push a new literal. /// When it comes time to pop it, we convert it to an Hir via Hir::literal. Literal(Vec<u8>), /// A Unicode character class. This frame is mutated as we descend into /// the Ast of a character class (which is itself its own mini recursive /// structure). ClassUnicode(hir::ClassUnicode), /// A byte-oriented character class. This frame is mutated as we descend /// into the Ast of a character class (which is itself its own mini /// recursive structure). /// /// Byte character classes are created when Unicode mode (`u`) is disabled. /// If `utf8` is enabled (the default), then a byte character is only /// permitted to match ASCII text. ClassBytes(hir::ClassBytes), /// This is pushed whenever a repetition is observed. After visiting every /// sub-expression in the repetition, the translator's stack is expected to /// have this sentinel at the top. /// /// This sentinel only exists to stop other things (like flattening /// literals) from reaching across repetition operators. Repetition, /// This is pushed on to the stack upon first seeing any kind of capture, /// indicated by parentheses (including non-capturing groups). It is popped /// upon leaving a group. Group { /// The old active flags when this group was opened. /// /// If this group sets flags, then the new active flags are set to the /// result of merging the old flags with the flags introduced by this /// group. If the group doesn't set any flags, then this is simply /// equivalent to whatever flags were set when the group was opened. /// /// When this group is popped, the active flags should be restored to /// the flags set here. /// /// The "active" flags correspond to whatever flags are set in the /// Translator. old_flags: Flags, }, /// This is pushed whenever a concatenation is observed. After visiting /// every sub-expression in the concatenation, the translator's stack is /// popped until it sees a Concat frame. Concat, /// This is pushed whenever an alternation is observed. After visiting /// every sub-expression in the alternation, the translator's stack is /// popped until it sees an Alternation frame. Alternation, /// This is pushed immediately before each sub-expression in an /// alternation. This separates the branches of an alternation on the /// stack and prevents literal flattening from reaching across alternation /// branches. /// /// It is popped after each expression in a branch until an 'Alternation' /// frame is observed when doing a post visit on an alternation. AlternationBranch, } impl HirFrame { /// Assert that the current stack frame is an Hir expression and return it. fn unwrap_expr(self) -> Hir { match self { HirFrame::Expr(expr) => expr, HirFrame::Literal(lit) => Hir::literal(lit), _ => panic!("tried to unwrap expr from HirFrame, got: {:?}", self), } } /// Assert that the current stack frame is a Unicode class expression and /// return it. fn unwrap_class_unicode(self) -> hir::ClassUnicode { match self { HirFrame::ClassUnicode(cls) => cls, _ => panic!( "tried to unwrap Unicode class \ from HirFrame, got: {:?}", self ), } } /// Assert that the current stack frame is a byte class expression and /// return it. fn unwrap_class_bytes(self) -> hir::ClassBytes { match self { HirFrame::ClassBytes(cls) => cls, _ => panic!( "tried to unwrap byte class \ from HirFrame, got: {:?}", self ), } } /// Assert that the current stack frame is a repetition sentinel. If it /// isn't, then panic. fn unwrap_repetition(self) { match self { HirFrame::Repetition => {} _ => { panic!( "tried to unwrap repetition from HirFrame, got: {:?}", self ) } } } /// Assert that the current stack frame is a group indicator and return /// its corresponding flags (the flags that were active at the time the /// group was entered). fn unwrap_group(self) -> Flags { match self { HirFrame::Group { old_flags } => old_flags, _ => { panic!("tried to unwrap group from HirFrame, got: {:?}", self) } } } /// Assert that the current stack frame is an alternation pipe sentinel. If /// it isn't, then panic. fn unwrap_alternation_pipe(self) { match self { HirFrame::AlternationBranch => {} _ => { panic!( "tried to unwrap alt pipe from HirFrame, got: {:?}", self ) } } } } impl<'t, 'p> Visitor for TranslatorI<'t, 'p> { type Output = Hir; type Err = Error; fn finish(self) -> Result<Hir> { // ... otherwise, we should have exactly one HIR on the stack. assert_eq!(self.trans().stack.borrow().len(), 1); Ok(self.pop().unwrap().unwrap_expr()) } fn visit_pre(&mut self, ast: &Ast) -> Result<()> { match *ast { Ast::Class(ast::Class::Bracketed(_)) => { if self.flags().unicode() { let cls = hir::ClassUnicode::empty(); self.push(HirFrame::ClassUnicode(cls)); } else { let cls = hir::ClassBytes::empty(); self.push(HirFrame::ClassBytes(cls)); } } Ast::Repetition(_) => self.push(HirFrame::Repetition), Ast::Group(ref x) => { let old_flags = x .flags() .map(|ast| self.set_flags(ast)) .unwrap_or_else(|| self.flags()); self.push(HirFrame::Group { old_flags }); } Ast::Concat(ref x) if x.asts.is_empty() => {} Ast::Concat(_) => { self.push(HirFrame::Concat); } Ast::Alternation(ref x) if x.asts.is_empty() => {} Ast::Alternation(_) => { self.push(HirFrame::Alternation); self.push(HirFrame::AlternationBranch); } _ => {} } Ok(()) } fn visit_post(&mut self, ast: &Ast) -> Result<()> { match *ast { Ast::Empty(_) => { self.push(HirFrame::Expr(Hir::empty())); } Ast::Flags(ref x) => { self.set_flags(&x.flags); // Flags in the AST are generally considered directives and // not actual sub-expressions. However, they can be used in // the concrete syntax like `((?i))`, and we need some kind of // indication of an expression there, and Empty is the correct // choice. // // There can also be things like `(?i)+`, but we rule those out // in the parser. In the future, we might allow them for // consistency sake. self.push(HirFrame::Expr(Hir::empty())); } Ast::Literal(ref x) => { match self.ast_literal_to_scalar(x)? { Either::Right(byte) => self.push_byte(byte), Either::Left(ch) => { if !self.flags().unicode() && ch.len_utf8() > 1 { return Err(self .error(x.span, ErrorKind::UnicodeNotAllowed)); } match self.case_fold_char(x.span, ch)? { None => self.push_char(ch), Some(expr) => self.push(HirFrame::Expr(expr)), } } } // self.push(HirFrame::Expr(self.hir_literal(x)?)); } Ast::Dot(span) => { self.push(HirFrame::Expr(self.hir_dot(span)?)); } Ast::Assertion(ref x) => { self.push(HirFrame::Expr(self.hir_assertion(x)?)); } Ast::Class(ast::Class::Perl(ref x)) => { if self.flags().unicode() { let cls = self.hir_perl_unicode_class(x)?; let hcls = hir::Class::Unicode(cls); self.push(HirFrame::Expr(Hir::class(hcls))); } else { let cls = self.hir_perl_byte_class(x)?; let hcls = hir::Class::Bytes(cls); self.push(HirFrame::Expr(Hir::class(hcls))); } } Ast::Class(ast::Class::Unicode(ref x)) => { let cls = hir::Class::Unicode(self.hir_unicode_class(x)?); self.push(HirFrame::Expr(Hir::class(cls))); } Ast::Class(ast::Class::Bracketed(ref ast)) => { if self.flags().unicode() { let mut cls = self.pop().unwrap().unwrap_class_unicode(); self.unicode_fold_and_negate( &ast.span, ast.negated, &mut cls, )?; let expr = Hir::class(hir::Class::Unicode(cls)); self.push(HirFrame::Expr(expr)); } else { let mut cls = self.pop().unwrap().unwrap_class_bytes(); self.bytes_fold_and_negate( &ast.span, ast.negated, &mut cls, )?; let expr = Hir::class(hir::Class::Bytes(cls)); self.push(HirFrame::Expr(expr)); } } Ast::Repetition(ref x) => { let expr = self.pop().unwrap().unwrap_expr(); self.pop().unwrap().unwrap_repetition(); self.push(HirFrame::Expr(self.hir_repetition(x, expr))); } Ast::Group(ref x) => { let expr = self.pop().unwrap().unwrap_expr(); let old_flags = self.pop().unwrap().unwrap_group(); self.trans().flags.set(old_flags); self.push(HirFrame::Expr(self.hir_capture(x, expr))); } Ast::Concat(_) => { let mut exprs = vec![]; while let Some(expr) = self.pop_concat_expr() { if !matches!(*expr.kind(), HirKind::Empty) { exprs.push(expr); } } exprs.reverse(); self.push(HirFrame::Expr(Hir::concat(exprs))); } Ast::Alternation(_) => { let mut exprs = vec![]; while let Some(expr) = self.pop_alt_expr() { self.pop().unwrap().unwrap_alternation_pipe(); exprs.push(expr); } exprs.reverse(); self.push(HirFrame::Expr(Hir::alternation(exprs))); } } Ok(()) } fn visit_alternation_in(&mut self) -> Result<()> { self.push(HirFrame::AlternationBranch); Ok(()) } fn visit_class_set_item_pre( &mut self, ast: &ast::ClassSetItem, ) -> Result<()> { match *ast { ast::ClassSetItem::Bracketed(_) => { if self.flags().unicode() { let cls = hir::ClassUnicode::empty(); self.push(HirFrame::ClassUnicode(cls)); } else { let cls = hir::ClassBytes::empty(); self.push(HirFrame::ClassBytes(cls)); } } // We needn't handle the Union case here since the visitor will // do it for us. _ => {} } Ok(()) } fn visit_class_set_item_post( &mut self, ast: &ast::ClassSetItem, ) -> Result<()> { match *ast { ast::ClassSetItem::Empty(_) => {} ast::ClassSetItem::Literal(ref x) => { if self.flags().unicode() { let mut cls = self.pop().unwrap().unwrap_class_unicode(); cls.push(hir::ClassUnicodeRange::new(x.c, x.c)); self.push(HirFrame::ClassUnicode(cls)); } else { let mut cls = self.pop().unwrap().unwrap_class_bytes(); let byte = self.class_literal_byte(x)?; cls.push(hir::ClassBytesRange::new(byte, byte)); self.push(HirFrame::ClassBytes(cls)); } } ast::ClassSetItem::Range(ref x) => { if self.flags().unicode() { let mut cls = self.pop().unwrap().unwrap_class_unicode(); cls.push(hir::ClassUnicodeRange::new(x.start.c, x.end.c)); self.push(HirFrame::ClassUnicode(cls)); } else { let mut cls = self.pop().unwrap().unwrap_class_bytes(); let start = self.class_literal_byte(&x.start)?; let end = self.class_literal_byte(&x.end)?; cls.push(hir::ClassBytesRange::new(start, end)); self.push(HirFrame::ClassBytes(cls)); } } ast::ClassSetItem::Ascii(ref x) => { if self.flags().unicode() { let xcls = self.hir_ascii_unicode_class(x)?; let mut cls = self.pop().unwrap().unwrap_class_unicode(); cls.union(&xcls); self.push(HirFrame::ClassUnicode(cls)); } else { let xcls = self.hir_ascii_byte_class(x)?; let mut cls = self.pop().unwrap().unwrap_class_bytes(); cls.union(&xcls); self.push(HirFrame::ClassBytes(cls)); } } ast::ClassSetItem::Unicode(ref x) => { let xcls = self.hir_unicode_class(x)?; let mut cls = self.pop().unwrap().unwrap_class_unicode(); cls.union(&xcls); self.push(HirFrame::ClassUnicode(cls)); } ast::ClassSetItem::Perl(ref x) => { if self.flags().unicode() { let xcls = self.hir_perl_unicode_class(x)?; let mut cls = self.pop().unwrap().unwrap_class_unicode(); cls.union(&xcls); self.push(HirFrame::ClassUnicode(cls)); } else { let xcls = self.hir_perl_byte_class(x)?; let mut cls = self.pop().unwrap().unwrap_class_bytes(); cls.union(&xcls); self.push(HirFrame::ClassBytes(cls)); } } ast::ClassSetItem::Bracketed(ref ast) => { if self.flags().unicode() { let mut cls1 = self.pop().unwrap().unwrap_class_unicode(); self.unicode_fold_and_negate( &ast.span, ast.negated, &mut cls1, )?; let mut cls2 = self.pop().unwrap().unwrap_class_unicode(); cls2.union(&cls1); self.push(HirFrame::ClassUnicode(cls2)); } else { let mut cls1 = self.pop().unwrap().unwrap_class_bytes(); self.bytes_fold_and_negate( &ast.span, ast.negated, &mut cls1, )?; let mut cls2 = self.pop().unwrap().unwrap_class_bytes(); cls2.union(&cls1); self.push(HirFrame::ClassBytes(cls2)); } } // This is handled automatically by the visitor. ast::ClassSetItem::Union(_) => {} } Ok(()) } fn visit_class_set_binary_op_pre( &mut self, _op: &ast::ClassSetBinaryOp, ) -> Result<()> { if self.flags().unicode() { let cls = hir::ClassUnicode::empty(); self.push(HirFrame::ClassUnicode(cls)); } else { let cls = hir::ClassBytes::empty(); self.push(HirFrame::ClassBytes(cls)); } Ok(()) } fn visit_class_set_binary_op_in( &mut self, _op: &ast::ClassSetBinaryOp, ) -> Result<()> { if self.flags().unicode() { let cls = hir::ClassUnicode::empty(); self.push(HirFrame::ClassUnicode(cls)); } else { let cls = hir::ClassBytes::empty(); self.push(HirFrame::ClassBytes(cls)); } Ok(()) } fn visit_class_set_binary_op_post( &mut self, op: &ast::ClassSetBinaryOp, ) -> Result<()> { use crate::ast::ClassSetBinaryOpKind::*; if self.flags().unicode() { let mut rhs = self.pop().unwrap().unwrap_class_unicode(); let mut lhs = self.pop().unwrap().unwrap_class_unicode(); let mut cls = self.pop().unwrap().unwrap_class_unicode(); if self.flags().case_insensitive() { rhs.try_case_fold_simple().map_err(|_| { self.error( op.rhs.span().clone(), ErrorKind::UnicodeCaseUnavailable, ) })?; lhs.try_case_fold_simple().map_err(|_| { self.error( op.lhs.span().clone(), ErrorKind::UnicodeCaseUnavailable, ) })?; } match op.kind { Intersection => lhs.intersect(&rhs), Difference => lhs.difference(&rhs), SymmetricDifference => lhs.symmetric_difference(&rhs), } cls.union(&lhs); self.push(HirFrame::ClassUnicode(cls)); } else { let mut rhs = self.pop().unwrap().unwrap_class_bytes(); let mut lhs = self.pop().unwrap().unwrap_class_bytes(); let mut cls = self.pop().unwrap().unwrap_class_bytes(); if self.flags().case_insensitive() { rhs.case_fold_simple(); lhs.case_fold_simple(); } match op.kind { Intersection => lhs.intersect(&rhs), Difference => lhs.difference(&rhs), SymmetricDifference => lhs.symmetric_difference(&rhs), } cls.union(&lhs); self.push(HirFrame::ClassBytes(cls)); } Ok(()) } } /// The internal implementation of a translator. /// /// This type is responsible for carrying around the original pattern string, /// which is not tied to the internal state of a translator. /// /// A TranslatorI exists for the time it takes to translate a single Ast. #[derive(Clone, Debug)] struct TranslatorI<'t, 'p> { trans: &'t Translator, pattern: &'p str, } impl<'t, 'p> TranslatorI<'t, 'p> { /// Build a new internal translator. fn new(trans: &'t Translator, pattern: &'p str) -> TranslatorI<'t, 'p> { TranslatorI { trans, pattern } } /// Return a reference to the underlying translator. fn trans(&self) -> &Translator { &self.trans } /// Push the given frame on to the call stack. fn push(&self, frame: HirFrame) { self.trans().stack.borrow_mut().push(frame); } /// Push the given literal char on to the call stack. /// /// If the top-most element of the stack is a literal, then the char /// is appended to the end of that literal. Otherwise, a new literal /// containing just the given char is pushed to the top of the stack. fn push_char(&self, ch: char) { let mut buf = [0; 4]; let bytes = ch.encode_utf8(&mut buf).as_bytes(); let mut stack = self.trans().stack.borrow_mut(); if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { literal.extend_from_slice(bytes); } else { stack.push(HirFrame::Literal(bytes.to_vec())); } } /// Push the given literal byte on to the call stack. /// /// If the top-most element of the stack is a literal, then the byte /// is appended to the end of that literal. Otherwise, a new literal /// containing just the given byte is pushed to the top of the stack. fn push_byte(&self, byte: u8) { let mut stack = self.trans().stack.borrow_mut(); if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { literal.push(byte); } else { stack.push(HirFrame::Literal(vec![byte])); } } /// Pop the top of the call stack. If the call stack is empty, return None. fn pop(&self) -> Option<HirFrame> { self.trans().stack.borrow_mut().pop() } /// Pop an HIR expression from the top of the stack for a concatenation. /// /// This returns None if the stack is empty or when a concat frame is seen. /// Otherwise, it panics if it could not find an HIR expression. fn pop_concat_expr(&self) -> Option<Hir> { let frame = self.pop()?; match frame { HirFrame::Concat => None, HirFrame::Expr(expr) => Some(expr), HirFrame::Literal(lit) => Some(Hir::literal(lit)), HirFrame::ClassUnicode(_) => { unreachable!("expected expr or concat, got Unicode class") } HirFrame::ClassBytes(_) => { unreachable!("expected expr or concat, got byte class") } HirFrame::Repetition => { unreachable!("expected expr or concat, got repetition") } HirFrame::Group { .. } => { unreachable!("expected expr or concat, got group") } HirFrame::Alternation => { unreachable!("expected expr or concat, got alt marker") } HirFrame::AlternationBranch => { unreachable!("expected expr or concat, got alt branch marker") } } } /// Pop an HIR expression from the top of the stack for an alternation. /// /// This returns None if the stack is empty or when an alternation frame is /// seen. Otherwise, it panics if it could not find an HIR expression. fn pop_alt_expr(&self) -> Option<Hir> { let frame = self.pop()?; match frame { HirFrame::Alternation => None, HirFrame::Expr(expr) => Some(expr), HirFrame::Literal(lit) => Some(Hir::literal(lit)), HirFrame::ClassUnicode(_) => { unreachable!("expected expr or alt, got Unicode class") } HirFrame::ClassBytes(_) => { unreachable!("expected expr or alt, got byte class") } HirFrame::Repetition => { unreachable!("expected expr or alt, got repetition") } HirFrame::Group { .. } => { unreachable!("expected expr or alt, got group") } HirFrame::Concat => { unreachable!("expected expr or alt, got concat marker") } HirFrame::AlternationBranch => { unreachable!("expected expr or alt, got alt branch marker") } } } /// Create a new error with the given span and error type. fn error(&self, span: Span, kind: ErrorKind) -> Error { Error { kind, pattern: self.pattern.to_string(), span } } /// Return a copy of the active flags. fn flags(&self) -> Flags { self.trans().flags.get() } /// Set the flags of this translator from the flags set in the given AST. /// Then, return the old flags. fn set_flags(&self, ast_flags: &ast::Flags) -> Flags { let old_flags = self.flags(); let mut new_flags = Flags::from_ast(ast_flags); new_flags.merge(&old_flags); self.trans().flags.set(new_flags); old_flags } /// Convert an Ast literal to its scalar representation. /// /// When Unicode mode is enabled, then this always succeeds and returns a /// `char` (Unicode scalar value). /// /// When Unicode mode is disabled, then a `char` will still be returned /// whenever possible. A byte is returned only when invalid UTF-8 is /// allowed and when the byte is not ASCII. Otherwise, a non-ASCII byte /// will result in an error when invalid UTF-8 is not allowed. fn ast_literal_to_scalar( &self, lit: &ast::Literal, ) -> Result<Either<char, u8>> { if self.flags().unicode() { return Ok(Either::Left(lit.c)); } let byte = match lit.byte() { None => return Ok(Either::Left(lit.c)), Some(byte) => byte, }; if byte <= 0x7F { return Ok(Either::Left(char::try_from(byte).unwrap())); } if self.trans().utf8 { return Err(self.error(lit.span, ErrorKind::InvalidUtf8)); } Ok(Either::Right(byte)) } fn case_fold_char(&self, span: Span, c: char) -> Result<Option<Hir>> { if !self.flags().case_insensitive() { return Ok(None); } if self.flags().unicode() { // If case folding won't do anything, then don't bother trying. let map = unicode::SimpleCaseFolder::new() .map(|f| f.overlaps(c, c)) .map_err(|_| { self.error(span, ErrorKind::UnicodeCaseUnavailable) })?; if !map { return Ok(None); } let mut cls = hir::ClassUnicode::new(vec![hir::ClassUnicodeRange::new( c, c, )]); cls.try_case_fold_simple().map_err(|_| { self.error(span, ErrorKind::UnicodeCaseUnavailable) })?; Ok(Some(Hir::class(hir::Class::Unicode(cls)))) } else { if c.len_utf8() > 1 { return Err(self.error(span, ErrorKind::UnicodeNotAllowed)); } // If case folding won't do anything, then don't bother trying. match c { 'A'..='Z' | 'a'..='z' => {} _ => return Ok(None), } let mut cls = hir::ClassBytes::new(vec![hir::ClassBytesRange::new( // OK because 'c.len_utf8() == 1' which in turn implies // that 'c' is ASCII. u8::try_from(c).unwrap(), u8::try_from(c).unwrap(), )]); cls.case_fold_simple(); Ok(Some(Hir::class(hir::Class::Bytes(cls)))) } } fn hir_dot(&self, span: Span) -> Result<Hir> { let (utf8, lineterm, flags) = (self.trans().utf8, self.trans().line_terminator, self.flags()); if utf8 && (!flags.unicode() || !lineterm.is_ascii()) { return Err(self.error(span, ErrorKind::InvalidUtf8)); } let dot = if flags.dot_matches_new_line() { if flags.unicode() { hir::Dot::AnyChar } else { hir::Dot::AnyByte } } else { if flags.unicode() { if flags.crlf() { hir::Dot::AnyCharExceptCRLF } else { if !lineterm.is_ascii() { return Err( self.error(span, ErrorKind::InvalidLineTerminator) ); } hir::Dot::AnyCharExcept(char::from(lineterm)) } } else { if flags.crlf() { hir::Dot::AnyByteExceptCRLF } else { hir::Dot::AnyByteExcept(lineterm) } } }; Ok(Hir::dot(dot)) } fn hir_assertion(&self, asst: &ast::Assertion) -> Result<Hir> { let unicode = self.flags().unicode(); let multi_line = self.flags().multi_line(); let crlf = self.flags().crlf(); Ok(match asst.kind { ast::AssertionKind::StartLine => Hir::look(if multi_line { if crlf { hir::Look::StartCRLF } else { hir::Look::StartLF } } else { hir::Look::Start }), ast::AssertionKind::EndLine => Hir::look(if multi_line { if crlf { hir::Look::EndCRLF } else { hir::Look::EndLF } } else { hir::Look::End }), ast::AssertionKind::StartText => Hir::look(hir::Look::Start), ast::AssertionKind::EndText => Hir::look(hir::Look::End), ast::AssertionKind::WordBoundary => Hir::look(if unicode { hir::Look::WordUnicode } else { hir::Look::WordAscii }), ast::AssertionKind::NotWordBoundary => Hir::look(if unicode { hir::Look::WordUnicodeNegate } else { hir::Look::WordAsciiNegate }), }) } fn hir_capture(&self, group: &ast::Group, expr: Hir) -> Hir { let (index, name) = match group.kind { ast::GroupKind::CaptureIndex(index) => (index, None), ast::GroupKind::CaptureName { ref name, .. } => { (name.index, Some(name.name.clone().into_boxed_str())) } // The HIR doesn't need to use non-capturing groups, since the way // in which the data type is defined handles this automatically. ast::GroupKind::NonCapturing(_) => return expr, }; Hir::capture(hir::Capture { index, name, sub: Box::new(expr) }) } fn hir_repetition(&self, rep: &ast::Repetition, expr: Hir) -> Hir { let (min, max) = match rep.op.kind { ast::RepetitionKind::ZeroOrOne => (0, Some(1)), ast::RepetitionKind::ZeroOrMore => (0, None), ast::RepetitionKind::OneOrMore => (1, None), ast::RepetitionKind::Range(ast::RepetitionRange::Exactly(m)) => { (m, Some(m)) } ast::RepetitionKind::Range(ast::RepetitionRange::AtLeast(m)) => { (m, None) } ast::RepetitionKind::Range(ast::RepetitionRange::Bounded( m, n, )) => (m, Some(n)), }; let greedy = if self.flags().swap_greed() { !rep.greedy } else { rep.greedy }; Hir::repetition(hir::Repetition { min, max, greedy, sub: Box::new(expr), }) } fn hir_unicode_class( &self, ast_class: &ast::ClassUnicode, ) -> Result<hir::ClassUnicode> { use crate::ast::ClassUnicodeKind::*; if !self.flags().unicode() { return Err( self.error(ast_class.span, ErrorKind::UnicodeNotAllowed) ); } let query = match ast_class.kind { OneLetter(name) => ClassQuery::OneLetter(name), Named(ref name) => ClassQuery::Binary(name), NamedValue { ref name, ref value, .. } => ClassQuery::ByValue { property_name: name, property_value: value, }, }; let mut result = self.convert_unicode_class_error( &ast_class.span, unicode::class(query), ); if let Ok(ref mut class) = result { self.unicode_fold_and_negate( &ast_class.span, ast_class.negated, class, )?; } result } fn hir_ascii_unicode_class( &self, ast: &ast::ClassAscii, ) -> Result<hir::ClassUnicode> { let mut cls = hir::ClassUnicode::new( ascii_class_as_chars(&ast.kind) .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), ); self.unicode_fold_and_negate(&ast.span, ast.negated, &mut cls)?; Ok(cls) } fn hir_ascii_byte_class( &self, ast: &ast::ClassAscii, ) -> Result<hir::ClassBytes> { let mut cls = hir::ClassBytes::new( ascii_class(&ast.kind) .map(|(s, e)| hir::ClassBytesRange::new(s, e)), ); self.bytes_fold_and_negate(&ast.span, ast.negated, &mut cls)?; Ok(cls) } fn hir_perl_unicode_class( &self, ast_class: &ast::ClassPerl, ) -> Result<hir::ClassUnicode> { use crate::ast::ClassPerlKind::*; assert!(self.flags().unicode()); let result = match ast_class.kind { Digit => unicode::perl_digit(), Space => unicode::perl_space(), Word => unicode::perl_word(), }; let mut class = self.convert_unicode_class_error(&ast_class.span, result)?; // We needn't apply case folding here because the Perl Unicode classes // are already closed under Unicode simple case folding. if ast_class.negated { class.negate(); } Ok(class) } fn hir_perl_byte_class( &self, ast_class: &ast::ClassPerl, ) -> Result<hir::ClassBytes> { use crate::ast::ClassPerlKind::*; assert!(!self.flags().unicode()); let mut class = match ast_class.kind { Digit => hir_ascii_class_bytes(&ast::ClassAsciiKind::Digit), Space => hir_ascii_class_bytes(&ast::ClassAsciiKind::Space), Word => hir_ascii_class_bytes(&ast::ClassAsciiKind::Word), }; // We needn't apply case folding here because the Perl ASCII classes // are already closed (under ASCII case folding). if ast_class.negated { class.negate(); } // Negating a Perl byte class is likely to cause it to match invalid // UTF-8. That's only OK if the translator is configured to allow such // things. if self.trans().utf8 && !class.is_ascii() { return Err(self.error(ast_class.span, ErrorKind::InvalidUtf8)); } Ok(class) } /// Converts the given Unicode specific error to an HIR translation error. /// /// The span given should approximate the position at which an error would /// occur. fn convert_unicode_class_error( &self, span: &Span, result: core::result::Result<hir::ClassUnicode, unicode::Error>, ) -> Result<hir::ClassUnicode> { result.map_err(|err| { let sp = span.clone(); match err { unicode::Error::PropertyNotFound => { self.error(sp, ErrorKind::UnicodePropertyNotFound) } unicode::Error::PropertyValueNotFound => { self.error(sp, ErrorKind::UnicodePropertyValueNotFound) } unicode::Error::PerlClassNotFound => { self.error(sp, ErrorKind::UnicodePerlClassNotFound) } } }) } fn unicode_fold_and_negate( &self, span: &Span, negated: bool, class: &mut hir::ClassUnicode, ) -> Result<()> { // Note that we must apply case folding before negation! // Consider `(?i)[^x]`. If we applied negation first, then // the result would be the character class that matched any // Unicode scalar value. if self.flags().case_insensitive() { class.try_case_fold_simple().map_err(|_| { self.error(span.clone(), ErrorKind::UnicodeCaseUnavailable) })?; } if negated { class.negate(); } Ok(()) } fn bytes_fold_and_negate( &self, span: &Span, negated: bool, class: &mut hir::ClassBytes, ) -> Result<()> { // Note that we must apply case folding before negation! // Consider `(?i)[^x]`. If we applied negation first, then // the result would be the character class that matched any // Unicode scalar value. if self.flags().case_insensitive() { class.case_fold_simple(); } if negated { class.negate(); } if self.trans().utf8 && !class.is_ascii() { return Err(self.error(span.clone(), ErrorKind::InvalidUtf8)); } Ok(()) } /// Return a scalar byte value suitable for use as a literal in a byte /// character class. fn class_literal_byte(&self, ast: &ast::Literal) -> Result<u8> { match self.ast_literal_to_scalar(ast)? { Either::Right(byte) => Ok(byte), Either::Left(ch) => { let cp = u32::from(ch); if cp <= 0x7F { Ok(u8::try_from(cp).unwrap()) } else { // We can't feasibly support Unicode in // byte oriented classes. Byte classes don't // do Unicode case folding. Err(self.error(ast.span, ErrorKind::UnicodeNotAllowed)) } } } } } /// A translator's representation of a regular expression's flags at any given /// moment in time. /// /// Each flag can be in one of three states: absent, present but disabled or /// present but enabled. #[derive(Clone, Copy, Debug, Default)] struct Flags { case_insensitive: Option<bool>, multi_line: Option<bool>, dot_matches_new_line: Option<bool>, swap_greed: Option<bool>, unicode: Option<bool>, crlf: Option<bool>, // Note that `ignore_whitespace` is omitted here because it is handled // entirely in the parser. } impl Flags { fn from_ast(ast: &ast::Flags) -> Flags { let mut flags = Flags::default(); let mut enable = true; for item in &ast.items { match item.kind { ast::FlagsItemKind::Negation => { enable = false; } ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive) => { flags.case_insensitive = Some(enable); } ast::FlagsItemKind::Flag(ast::Flag::MultiLine) => { flags.multi_line = Some(enable); } ast::FlagsItemKind::Flag(ast::Flag::DotMatchesNewLine) => { flags.dot_matches_new_line = Some(enable); } ast::FlagsItemKind::Flag(ast::Flag::SwapGreed) => { flags.swap_greed = Some(enable); } ast::FlagsItemKind::Flag(ast::Flag::Unicode) => { flags.unicode = Some(enable); } ast::FlagsItemKind::Flag(ast::Flag::CRLF) => { flags.crlf = Some(enable); } ast::FlagsItemKind::Flag(ast::Flag::IgnoreWhitespace) => {} } } flags } fn merge(&mut self, previous: &Flags) { if self.case_insensitive.is_none() { self.case_insensitive = previous.case_insensitive; } if self.multi_line.is_none() { self.multi_line = previous.multi_line; } if self.dot_matches_new_line.is_none() { self.dot_matches_new_line = previous.dot_matches_new_line; } if self.swap_greed.is_none() { self.swap_greed = previous.swap_greed; } if self.unicode.is_none() { self.unicode = previous.unicode; } if self.crlf.is_none() { self.crlf = previous.crlf; } } fn case_insensitive(&self) -> bool { self.case_insensitive.unwrap_or(false) } fn multi_line(&self) -> bool { self.multi_line.unwrap_or(false) } fn dot_matches_new_line(&self) -> bool { self.dot_matches_new_line.unwrap_or(false) } fn swap_greed(&self) -> bool { self.swap_greed.unwrap_or(false) } fn unicode(&self) -> bool { self.unicode.unwrap_or(true) } fn crlf(&self) -> bool { self.crlf.unwrap_or(false) } } fn hir_ascii_class_bytes(kind: &ast::ClassAsciiKind) -> hir::ClassBytes { let ranges: Vec<_> = ascii_class(kind) .map(|(s, e)| hir::ClassBytesRange::new(s, e)) .collect(); hir::ClassBytes::new(ranges) } fn ascii_class(kind: &ast::ClassAsciiKind) -> impl Iterator<Item = (u8, u8)> { use crate::ast::ClassAsciiKind::*; let slice: &'static [(u8, u8)] = match *kind { Alnum => &[(b'0', b'9'), (b'A', b'Z'), (b'a', b'z')], Alpha => &[(b'A', b'Z'), (b'a', b'z')], Ascii => &[(b'\x00', b'\x7F')], Blank => &[(b'\t', b'\t'), (b' ', b' ')], Cntrl => &[(b'\x00', b'\x1F'), (b'\x7F', b'\x7F')], Digit => &[(b'0', b'9')], Graph => &[(b'!', b'~')], Lower => &[(b'a', b'z')], Print => &[(b' ', b'~')], Punct => &[(b'!', b'/'), (b':', b'@'), (b'[', b'`'), (b'{', b'~')], Space => &[ (b'\t', b'\t'), (b'\n', b'\n'), (b'\x0B', b'\x0B'), (b'\x0C', b'\x0C'), (b'\r', b'\r'), (b' ', b' '), ], Upper => &[(b'A', b'Z')], Word => &[(b'0', b'9'), (b'A', b'Z'), (b'_', b'_'), (b'a', b'z')], Xdigit => &[(b'0', b'9'), (b'A', b'F'), (b'a', b'f')], }; slice.iter().copied() } fn ascii_class_as_chars( kind: &ast::ClassAsciiKind, ) -> impl Iterator<Item = (char, char)> { ascii_class(kind).map(|(s, e)| (char::from(s), char::from(e))) } #[cfg(test)] mod tests { use crate::{ ast::{self, parse::ParserBuilder, Ast, Position, Span}, hir::{self, Hir, HirKind, Look, Properties}, unicode::{self, ClassQuery}, }; use super::*; // We create these errors to compare with real hir::Errors in the tests. // We define equality between TestError and hir::Error to disregard the // pattern string in hir::Error, which is annoying to provide in tests. #[derive(Clone, Debug)] struct TestError { span: Span, kind: hir::ErrorKind, } impl PartialEq<hir::Error> for TestError { fn eq(&self, other: &hir::Error) -> bool { self.span == other.span && self.kind == other.kind } } impl PartialEq<TestError> for hir::Error { fn eq(&self, other: &TestError) -> bool { self.span == other.span && self.kind == other.kind } } fn parse(pattern: &str) -> Ast { ParserBuilder::new().octal(true).build().parse(pattern).unwrap() } fn t(pattern: &str) -> Hir { TranslatorBuilder::new() .utf8(true) .build() .translate(pattern, &parse(pattern)) .unwrap() } fn t_err(pattern: &str) -> hir::Error { TranslatorBuilder::new() .utf8(true) .build() .translate(pattern, &parse(pattern)) .unwrap_err() } fn t_bytes(pattern: &str) -> Hir { TranslatorBuilder::new() .utf8(false) .build() .translate(pattern, &parse(pattern)) .unwrap() } fn props(pattern: &str) -> Properties { t(pattern).properties().clone() } fn props_bytes(pattern: &str) -> Properties { t_bytes(pattern).properties().clone() } fn hir_lit(s: &str) -> Hir { hir_blit(s.as_bytes()) } fn hir_blit(s: &[u8]) -> Hir { Hir::literal(s) } fn hir_capture(index: u32, expr: Hir) -> Hir { Hir::capture(hir::Capture { index, name: None, sub: Box::new(expr) }) } fn hir_capture_name(index: u32, name: &str, expr: Hir) -> Hir { Hir::capture(hir::Capture { index, name: Some(name.into()), sub: Box::new(expr), }) } fn hir_quest(greedy: bool, expr: Hir) -> Hir { Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy, sub: Box::new(expr), }) } fn hir_star(greedy: bool, expr: Hir) -> Hir { Hir::repetition(hir::Repetition { min: 0, max: None, greedy, sub: Box::new(expr), }) } fn hir_plus(greedy: bool, expr: Hir) -> Hir { Hir::repetition(hir::Repetition { min: 1, max: None, greedy, sub: Box::new(expr), }) } fn hir_range(greedy: bool, min: u32, max: Option<u32>, expr: Hir) -> Hir { Hir::repetition(hir::Repetition { min, max, greedy, sub: Box::new(expr), }) } fn hir_alt(alts: Vec<Hir>) -> Hir { Hir::alternation(alts) } fn hir_cat(exprs: Vec<Hir>) -> Hir { Hir::concat(exprs) } #[allow(dead_code)] fn hir_uclass_query(query: ClassQuery<'_>) -> Hir { Hir::class(hir::Class::Unicode(unicode::class(query).unwrap())) } #[allow(dead_code)] fn hir_uclass_perl_word() -> Hir { Hir::class(hir::Class::Unicode(unicode::perl_word().unwrap())) } fn hir_ascii_uclass(kind: &ast::ClassAsciiKind) -> Hir { Hir::class(hir::Class::Unicode(hir::ClassUnicode::new( ascii_class_as_chars(kind) .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), ))) } fn hir_ascii_bclass(kind: &ast::ClassAsciiKind) -> Hir { Hir::class(hir::Class::Bytes(hir::ClassBytes::new( ascii_class(kind).map(|(s, e)| hir::ClassBytesRange::new(s, e)), ))) } fn hir_uclass(ranges: &[(char, char)]) -> Hir { Hir::class(uclass(ranges)) } fn hir_bclass(ranges: &[(u8, u8)]) -> Hir { Hir::class(bclass(ranges)) } fn hir_case_fold(expr: Hir) -> Hir { match expr.into_kind() { HirKind::Class(mut cls) => { cls.case_fold_simple(); Hir::class(cls) } _ => panic!("cannot case fold non-class Hir expr"), } } fn hir_negate(expr: Hir) -> Hir { match expr.into_kind() { HirKind::Class(mut cls) => { cls.negate(); Hir::class(cls) } _ => panic!("cannot negate non-class Hir expr"), } } fn uclass(ranges: &[(char, char)]) -> hir::Class { let ranges: Vec<hir::ClassUnicodeRange> = ranges .iter() .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) .collect(); hir::Class::Unicode(hir::ClassUnicode::new(ranges)) } fn bclass(ranges: &[(u8, u8)]) -> hir::Class { let ranges: Vec<hir::ClassBytesRange> = ranges .iter() .map(|&(s, e)| hir::ClassBytesRange::new(s, e)) .collect(); hir::Class::Bytes(hir::ClassBytes::new(ranges)) } #[cfg(feature = "unicode-case")] fn class_case_fold(mut cls: hir::Class) -> Hir { cls.case_fold_simple(); Hir::class(cls) } fn class_negate(mut cls: hir::Class) -> Hir { cls.negate(); Hir::class(cls) } #[allow(dead_code)] fn hir_union(expr1: Hir, expr2: Hir) -> Hir { use crate::hir::Class::{Bytes, Unicode}; match (expr1.into_kind(), expr2.into_kind()) { (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { c1.union(&c2); Hir::class(hir::Class::Unicode(c1)) } (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { c1.union(&c2); Hir::class(hir::Class::Bytes(c1)) } _ => panic!("cannot union non-class Hir exprs"), } } #[allow(dead_code)] fn hir_difference(expr1: Hir, expr2: Hir) -> Hir { use crate::hir::Class::{Bytes, Unicode}; match (expr1.into_kind(), expr2.into_kind()) { (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { c1.difference(&c2); Hir::class(hir::Class::Unicode(c1)) } (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { c1.difference(&c2); Hir::class(hir::Class::Bytes(c1)) } _ => panic!("cannot difference non-class Hir exprs"), } } fn hir_look(look: hir::Look) -> Hir { Hir::look(look) } #[test] fn empty() { assert_eq!(t(""), Hir::empty()); assert_eq!(t("(?i)"), Hir::empty()); assert_eq!(t("()"), hir_capture(1, Hir::empty())); assert_eq!(t("(?:)"), Hir::empty()); assert_eq!(t("(?P<wat>)"), hir_capture_name(1, "wat", Hir::empty())); assert_eq!(t("|"), hir_alt(vec![Hir::empty(), Hir::empty()])); assert_eq!( t("()|()"), hir_alt(vec![ hir_capture(1, Hir::empty()), hir_capture(2, Hir::empty()), ]) ); assert_eq!( t("(|b)"), hir_capture(1, hir_alt(vec![Hir::empty(), hir_lit("b"),])) ); assert_eq!( t("(a|)"), hir_capture(1, hir_alt(vec![hir_lit("a"), Hir::empty(),])) ); assert_eq!( t("(a||c)"), hir_capture( 1, hir_alt(vec![hir_lit("a"), Hir::empty(), hir_lit("c"),]) ) ); assert_eq!( t("(||)"), hir_capture( 1, hir_alt(vec![Hir::empty(), Hir::empty(), Hir::empty(),]) ) ); } #[test] fn literal() { assert_eq!(t("a"), hir_lit("a")); assert_eq!(t("(?-u)a"), hir_lit("a")); assert_eq!(t("☃"), hir_lit("☃")); assert_eq!(t("abcd"), hir_lit("abcd")); assert_eq!(t_bytes("(?-u)a"), hir_lit("a")); assert_eq!(t_bytes("(?-u)\x61"), hir_lit("a")); assert_eq!(t_bytes(r"(?-u)\x61"), hir_lit("a")); assert_eq!(t_bytes(r"(?-u)\xFF"), hir_blit(b"\xFF")); assert_eq!( t_err("(?-u)☃"), TestError { kind: hir::ErrorKind::UnicodeNotAllowed, span: Span::new( Position::new(5, 1, 6), Position::new(8, 1, 7) ), } ); assert_eq!( t_err(r"(?-u)\xFF"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(5, 1, 6), Position::new(9, 1, 10) ), } ); } #[test] fn literal_case_insensitive() { #[cfg(feature = "unicode-case")] assert_eq!(t("(?i)a"), hir_uclass(&[('A', 'A'), ('a', 'a'),])); #[cfg(feature = "unicode-case")] assert_eq!(t("(?i:a)"), hir_uclass(&[('A', 'A'), ('a', 'a')])); #[cfg(feature = "unicode-case")] assert_eq!( t("a(?i)a(?-i)a"), hir_cat(vec![ hir_lit("a"), hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_lit("a"), ]) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)ab@c"), hir_cat(vec![ hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_uclass(&[('B', 'B'), ('b', 'b')]), hir_lit("@"), hir_uclass(&[('C', 'C'), ('c', 'c')]), ]) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)β"), hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) ); assert_eq!(t("(?i-u)a"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),])); #[cfg(feature = "unicode-case")] assert_eq!( t("(?-u)a(?i)a(?-i)a"), hir_cat(vec![ hir_lit("a"), hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), hir_lit("a"), ]) ); assert_eq!( t("(?i-u)ab@c"), hir_cat(vec![ hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), hir_bclass(&[(b'B', b'B'), (b'b', b'b')]), hir_lit("@"), hir_bclass(&[(b'C', b'C'), (b'c', b'c')]), ]) ); assert_eq!( t_bytes("(?i-u)a"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) ); assert_eq!( t_bytes("(?i-u)\x61"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) ); assert_eq!( t_bytes(r"(?i-u)\x61"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) ); assert_eq!(t_bytes(r"(?i-u)\xFF"), hir_blit(b"\xFF")); assert_eq!( t_err("(?i-u)β"), TestError { kind: hir::ErrorKind::UnicodeNotAllowed, span: Span::new( Position::new(6, 1, 7), Position::new(8, 1, 8), ), } ); } #[test] fn dot() { assert_eq!( t("."), hir_uclass(&[('\0', '\t'), ('\x0B', '\u{10FFFF}')]) ); assert_eq!( t("(?R)."), hir_uclass(&[ ('\0', '\t'), ('\x0B', '\x0C'), ('\x0E', '\u{10FFFF}'), ]) ); assert_eq!(t("(?s)."), hir_uclass(&[('\0', '\u{10FFFF}')])); assert_eq!(t("(?Rs)."), hir_uclass(&[('\0', '\u{10FFFF}')])); assert_eq!( t_bytes("(?-u)."), hir_bclass(&[(b'\0', b'\t'), (b'\x0B', b'\xFF')]) ); assert_eq!( t_bytes("(?R-u)."), hir_bclass(&[ (b'\0', b'\t'), (b'\x0B', b'\x0C'), (b'\x0E', b'\xFF'), ]) ); assert_eq!(t_bytes("(?s-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); assert_eq!(t_bytes("(?Rs-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); // If invalid UTF-8 isn't allowed, then non-Unicode `.` isn't allowed. assert_eq!( t_err("(?-u)."), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(5, 1, 6), Position::new(6, 1, 7) ), } ); assert_eq!( t_err("(?R-u)."), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(6, 1, 7), Position::new(7, 1, 8) ), } ); assert_eq!( t_err("(?s-u)."), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(6, 1, 7), Position::new(7, 1, 8) ), } ); assert_eq!( t_err("(?Rs-u)."), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(7, 1, 8), Position::new(8, 1, 9) ), } ); } #[test] fn assertions() { assert_eq!(t("^"), hir_look(hir::Look::Start)); assert_eq!(t("$"), hir_look(hir::Look::End)); assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); assert_eq!(t(r"\z"), hir_look(hir::Look::End)); assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); assert_eq!(t(r"\b"), hir_look(hir::Look::WordUnicode)); assert_eq!(t(r"\B"), hir_look(hir::Look::WordUnicodeNegate)); assert_eq!(t(r"(?-u)\b"), hir_look(hir::Look::WordAscii)); assert_eq!(t(r"(?-u)\B"), hir_look(hir::Look::WordAsciiNegate)); } #[test] fn group() { assert_eq!(t("(a)"), hir_capture(1, hir_lit("a"))); assert_eq!( t("(a)(b)"), hir_cat(vec![ hir_capture(1, hir_lit("a")), hir_capture(2, hir_lit("b")), ]) ); assert_eq!( t("(a)|(b)"), hir_alt(vec![ hir_capture(1, hir_lit("a")), hir_capture(2, hir_lit("b")), ]) ); assert_eq!(t("(?P<foo>)"), hir_capture_name(1, "foo", Hir::empty())); assert_eq!(t("(?P<foo>a)"), hir_capture_name(1, "foo", hir_lit("a"))); assert_eq!( t("(?P<foo>a)(?P<bar>b)"), hir_cat(vec![ hir_capture_name(1, "foo", hir_lit("a")), hir_capture_name(2, "bar", hir_lit("b")), ]) ); assert_eq!(t("(?:)"), Hir::empty()); assert_eq!(t("(?:a)"), hir_lit("a")); assert_eq!( t("(?:a)(b)"), hir_cat(vec![hir_lit("a"), hir_capture(1, hir_lit("b")),]) ); assert_eq!( t("(a)(?:b)(c)"), hir_cat(vec![ hir_capture(1, hir_lit("a")), hir_lit("b"), hir_capture(2, hir_lit("c")), ]) ); assert_eq!( t("(a)(?P<foo>b)(c)"), hir_cat(vec![ hir_capture(1, hir_lit("a")), hir_capture_name(2, "foo", hir_lit("b")), hir_capture(3, hir_lit("c")), ]) ); assert_eq!(t("()"), hir_capture(1, Hir::empty())); assert_eq!(t("((?i))"), hir_capture(1, Hir::empty())); assert_eq!(t("((?x))"), hir_capture(1, Hir::empty())); assert_eq!( t("(((?x)))"), hir_capture(1, hir_capture(2, Hir::empty())) ); } #[test] fn line_anchors() { assert_eq!(t("^"), hir_look(hir::Look::Start)); assert_eq!(t("$"), hir_look(hir::Look::End)); assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); assert_eq!(t(r"\z"), hir_look(hir::Look::End)); assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); assert_eq!(t(r"(?R)\A"), hir_look(hir::Look::Start)); assert_eq!(t(r"(?R)\z"), hir_look(hir::Look::End)); assert_eq!(t("(?R)^"), hir_look(hir::Look::Start)); assert_eq!(t("(?R)$"), hir_look(hir::Look::End)); assert_eq!(t(r"(?Rm)\A"), hir_look(hir::Look::Start)); assert_eq!(t(r"(?Rm)\z"), hir_look(hir::Look::End)); assert_eq!(t("(?Rm)^"), hir_look(hir::Look::StartCRLF)); assert_eq!(t("(?Rm)$"), hir_look(hir::Look::EndCRLF)); } #[test] fn flags() { #[cfg(feature = "unicode-case")] assert_eq!( t("(?i:a)a"), hir_cat( vec![hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_lit("a"),] ) ); assert_eq!( t("(?i-u:a)β"), hir_cat(vec![ hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), hir_lit("β"), ]) ); assert_eq!( t("(?:(?i-u)a)b"), hir_cat(vec![ hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), hir_lit("b"), ]) ); assert_eq!( t("((?i-u)a)b"), hir_cat(vec![ hir_capture(1, hir_bclass(&[(b'A', b'A'), (b'a', b'a')])), hir_lit("b"), ]) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)(?-i:a)a"), hir_cat( vec![hir_lit("a"), hir_uclass(&[('A', 'A'), ('a', 'a')]),] ) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?im)a^"), hir_cat(vec![ hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_look(hir::Look::StartLF), ]) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?im)a^(?i-m)a^"), hir_cat(vec![ hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_look(hir::Look::StartLF), hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_look(hir::Look::Start), ]) ); assert_eq!( t("(?U)a*a*?(?-U)a*a*?"), hir_cat(vec![ hir_star(false, hir_lit("a")), hir_star(true, hir_lit("a")), hir_star(true, hir_lit("a")), hir_star(false, hir_lit("a")), ]) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?:a(?i)a)a"), hir_cat(vec![ hir_cat(vec![ hir_lit("a"), hir_uclass(&[('A', 'A'), ('a', 'a')]), ]), hir_lit("a"), ]) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)(?:a(?-i)a)a"), hir_cat(vec![ hir_cat(vec![ hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_lit("a"), ]), hir_uclass(&[('A', 'A'), ('a', 'a')]), ]) ); } #[test] fn escape() { assert_eq!( t(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#"), hir_lit(r"\.+*?()|[]{}^$#") ); } #[test] fn repetition() { assert_eq!(t("a?"), hir_quest(true, hir_lit("a"))); assert_eq!(t("a*"), hir_star(true, hir_lit("a"))); assert_eq!(t("a+"), hir_plus(true, hir_lit("a"))); assert_eq!(t("a??"), hir_quest(false, hir_lit("a"))); assert_eq!(t("a*?"), hir_star(false, hir_lit("a"))); assert_eq!(t("a+?"), hir_plus(false, hir_lit("a"))); assert_eq!(t("a{1}"), hir_range(true, 1, Some(1), hir_lit("a"),)); assert_eq!(t("a{1,}"), hir_range(true, 1, None, hir_lit("a"),)); assert_eq!(t("a{1,2}"), hir_range(true, 1, Some(2), hir_lit("a"),)); assert_eq!(t("a{1}?"), hir_range(false, 1, Some(1), hir_lit("a"),)); assert_eq!(t("a{1,}?"), hir_range(false, 1, None, hir_lit("a"),)); assert_eq!(t("a{1,2}?"), hir_range(false, 1, Some(2), hir_lit("a"),)); assert_eq!( t("ab?"), hir_cat(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) ); assert_eq!(t("(ab)?"), hir_quest(true, hir_capture(1, hir_lit("ab")))); assert_eq!( t("a|b?"), hir_alt(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) ); } #[test] fn cat_alt() { let a = || hir_look(hir::Look::Start); let b = || hir_look(hir::Look::End); let c = || hir_look(hir::Look::WordUnicode); let d = || hir_look(hir::Look::WordUnicodeNegate); assert_eq!(t("(^$)"), hir_capture(1, hir_cat(vec![a(), b()]))); assert_eq!(t("^|$"), hir_alt(vec![a(), b()])); assert_eq!(t(r"^|$|\b"), hir_alt(vec![a(), b(), c()])); assert_eq!( t(r"^$|$\b|\b\B"), hir_alt(vec![ hir_cat(vec![a(), b()]), hir_cat(vec![b(), c()]), hir_cat(vec![c(), d()]), ]) ); assert_eq!(t("(^|$)"), hir_capture(1, hir_alt(vec![a(), b()]))); assert_eq!( t(r"(^|$|\b)"), hir_capture(1, hir_alt(vec![a(), b(), c()])) ); assert_eq!( t(r"(^$|$\b|\b\B)"), hir_capture( 1, hir_alt(vec![ hir_cat(vec![a(), b()]), hir_cat(vec![b(), c()]), hir_cat(vec![c(), d()]), ]) ) ); assert_eq!( t(r"(^$|($\b|(\b\B)))"), hir_capture( 1, hir_alt(vec![ hir_cat(vec![a(), b()]), hir_capture( 2, hir_alt(vec![ hir_cat(vec![b(), c()]), hir_capture(3, hir_cat(vec![c(), d()])), ]) ), ]) ) ); } // Tests the HIR transformation of things like '[a-z]|[A-Z]' into // '[A-Za-z]'. In other words, an alternation of just classes is always // equivalent to a single class corresponding to the union of the branches // in that class. (Unless some branches match invalid UTF-8 and others // match non-ASCII Unicode.) #[test] fn cat_class_flattened() { assert_eq!(t(r"[a-z]|[A-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); // Combining all of the letter properties should give us the one giant // letter property. #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"(?x) \p{Lowercase_Letter} |\p{Uppercase_Letter} |\p{Titlecase_Letter} |\p{Modifier_Letter} |\p{Other_Letter} "), hir_uclass_query(ClassQuery::Binary("letter")) ); // Byte classes that can truly match invalid UTF-8 cannot be combined // with Unicode classes. assert_eq!( t_bytes(r"[Δδ]|(?-u:[\x90-\xFF])|[Λλ]"), hir_alt(vec![ hir_uclass(&[('Δ', 'Δ'), ('δ', 'δ')]), hir_bclass(&[(b'\x90', b'\xFF')]), hir_uclass(&[('Λ', 'Λ'), ('λ', 'λ')]), ]) ); // Byte classes on their own can be combined, even if some are ASCII // and others are invalid UTF-8. assert_eq!( t_bytes(r"[a-z]|(?-u:[\x90-\xFF])|[A-Z]"), hir_bclass(&[(b'A', b'Z'), (b'a', b'z'), (b'\x90', b'\xFF')]), ); } #[test] fn class_ascii() { assert_eq!( t("[[:alnum:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Alnum) ); assert_eq!( t("[[:alpha:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Alpha) ); assert_eq!( t("[[:ascii:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Ascii) ); assert_eq!( t("[[:blank:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Blank) ); assert_eq!( t("[[:cntrl:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Cntrl) ); assert_eq!( t("[[:digit:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Digit) ); assert_eq!( t("[[:graph:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Graph) ); assert_eq!( t("[[:lower:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Lower) ); assert_eq!( t("[[:print:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Print) ); assert_eq!( t("[[:punct:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Punct) ); assert_eq!( t("[[:space:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Space) ); assert_eq!( t("[[:upper:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Upper) ); assert_eq!( t("[[:word:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Word) ); assert_eq!( t("[[:xdigit:]]"), hir_ascii_uclass(&ast::ClassAsciiKind::Xdigit) ); assert_eq!( t("[[:^lower:]]"), hir_negate(hir_ascii_uclass(&ast::ClassAsciiKind::Lower)) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[[:lower:]]"), hir_uclass(&[ ('A', 'Z'), ('a', 'z'), ('\u{17F}', '\u{17F}'), ('\u{212A}', '\u{212A}'), ]) ); assert_eq!( t("(?-u)[[:lower:]]"), hir_ascii_bclass(&ast::ClassAsciiKind::Lower) ); assert_eq!( t("(?i-u)[[:lower:]]"), hir_case_fold(hir_ascii_bclass(&ast::ClassAsciiKind::Lower)) ); assert_eq!( t_err("(?-u)[[:^lower:]]"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(6, 1, 7), Position::new(16, 1, 17) ), } ); assert_eq!( t_err("(?i-u)[[:^lower:]]"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(7, 1, 8), Position::new(17, 1, 18) ), } ); } #[test] fn class_ascii_multiple() { // See: https://github.com/rust-lang/regex/issues/680 assert_eq!( t("[[:alnum:][:^ascii:]]"), hir_union( hir_ascii_uclass(&ast::ClassAsciiKind::Alnum), hir_uclass(&[('\u{80}', '\u{10FFFF}')]), ), ); assert_eq!( t_bytes("(?-u)[[:alnum:][:^ascii:]]"), hir_union( hir_ascii_bclass(&ast::ClassAsciiKind::Alnum), hir_bclass(&[(0x80, 0xFF)]), ), ); } #[test] #[cfg(feature = "unicode-perl")] fn class_perl_unicode() { // Unicode assert_eq!(t(r"\d"), hir_uclass_query(ClassQuery::Binary("digit"))); assert_eq!(t(r"\s"), hir_uclass_query(ClassQuery::Binary("space"))); assert_eq!(t(r"\w"), hir_uclass_perl_word()); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)\d"), hir_uclass_query(ClassQuery::Binary("digit")) ); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)\s"), hir_uclass_query(ClassQuery::Binary("space")) ); #[cfg(feature = "unicode-case")] assert_eq!(t(r"(?i)\w"), hir_uclass_perl_word()); // Unicode, negated assert_eq!( t(r"\D"), hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) ); assert_eq!( t(r"\S"), hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) ); assert_eq!(t(r"\W"), hir_negate(hir_uclass_perl_word())); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)\D"), hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) ); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)\S"), hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) ); #[cfg(feature = "unicode-case")] assert_eq!(t(r"(?i)\W"), hir_negate(hir_uclass_perl_word())); } #[test] fn class_perl_ascii() { // ASCII only assert_eq!( t(r"(?-u)\d"), hir_ascii_bclass(&ast::ClassAsciiKind::Digit) ); assert_eq!( t(r"(?-u)\s"), hir_ascii_bclass(&ast::ClassAsciiKind::Space) ); assert_eq!( t(r"(?-u)\w"), hir_ascii_bclass(&ast::ClassAsciiKind::Word) ); assert_eq!( t(r"(?i-u)\d"), hir_ascii_bclass(&ast::ClassAsciiKind::Digit) ); assert_eq!( t(r"(?i-u)\s"), hir_ascii_bclass(&ast::ClassAsciiKind::Space) ); assert_eq!( t(r"(?i-u)\w"), hir_ascii_bclass(&ast::ClassAsciiKind::Word) ); // ASCII only, negated assert_eq!( t_bytes(r"(?-u)\D"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) ); assert_eq!( t_bytes(r"(?-u)\S"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) ); assert_eq!( t_bytes(r"(?-u)\W"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) ); assert_eq!( t_bytes(r"(?i-u)\D"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) ); assert_eq!( t_bytes(r"(?i-u)\S"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) ); assert_eq!( t_bytes(r"(?i-u)\W"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) ); // ASCII only, negated, with UTF-8 mode enabled. // In this case, negating any Perl class results in an error because // all such classes can match invalid UTF-8. assert_eq!( t_err(r"(?-u)\D"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(5, 1, 6), Position::new(7, 1, 8), ), }, ); assert_eq!( t_err(r"(?-u)\S"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(5, 1, 6), Position::new(7, 1, 8), ), }, ); assert_eq!( t_err(r"(?-u)\W"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(5, 1, 6), Position::new(7, 1, 8), ), }, ); assert_eq!( t_err(r"(?i-u)\D"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(6, 1, 7), Position::new(8, 1, 9), ), }, ); assert_eq!( t_err(r"(?i-u)\S"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(6, 1, 7), Position::new(8, 1, 9), ), }, ); assert_eq!( t_err(r"(?i-u)\W"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(6, 1, 7), Position::new(8, 1, 9), ), }, ); } #[test] #[cfg(not(feature = "unicode-perl"))] fn class_perl_word_disabled() { assert_eq!( t_err(r"\w"), TestError { kind: hir::ErrorKind::UnicodePerlClassNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(2, 1, 3) ), } ); } #[test] #[cfg(all(not(feature = "unicode-perl"), not(feature = "unicode-bool")))] fn class_perl_space_disabled() { assert_eq!( t_err(r"\s"), TestError { kind: hir::ErrorKind::UnicodePerlClassNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(2, 1, 3) ), } ); } #[test] #[cfg(all( not(feature = "unicode-perl"), not(feature = "unicode-gencat") ))] fn class_perl_digit_disabled() { assert_eq!( t_err(r"\d"), TestError { kind: hir::ErrorKind::UnicodePerlClassNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(2, 1, 3) ), } ); } #[test] #[cfg(feature = "unicode-gencat")] fn class_unicode_gencat() { assert_eq!(t(r"\pZ"), hir_uclass_query(ClassQuery::Binary("Z"))); assert_eq!(t(r"\pz"), hir_uclass_query(ClassQuery::Binary("Z"))); assert_eq!( t(r"\p{Separator}"), hir_uclass_query(ClassQuery::Binary("Z")) ); assert_eq!( t(r"\p{se PaRa ToR}"), hir_uclass_query(ClassQuery::Binary("Z")) ); assert_eq!( t(r"\p{gc:Separator}"), hir_uclass_query(ClassQuery::Binary("Z")) ); assert_eq!( t(r"\p{gc=Separator}"), hir_uclass_query(ClassQuery::Binary("Z")) ); assert_eq!( t(r"\p{Other}"), hir_uclass_query(ClassQuery::Binary("Other")) ); assert_eq!(t(r"\pC"), hir_uclass_query(ClassQuery::Binary("Other"))); assert_eq!( t(r"\PZ"), hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) ); assert_eq!( t(r"\P{separator}"), hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) ); assert_eq!( t(r"\P{gc!=separator}"), hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) ); assert_eq!(t(r"\p{any}"), hir_uclass_query(ClassQuery::Binary("Any"))); assert_eq!( t(r"\p{assigned}"), hir_uclass_query(ClassQuery::Binary("Assigned")) ); assert_eq!( t(r"\p{ascii}"), hir_uclass_query(ClassQuery::Binary("ASCII")) ); assert_eq!( t(r"\p{gc:any}"), hir_uclass_query(ClassQuery::Binary("Any")) ); assert_eq!( t(r"\p{gc:assigned}"), hir_uclass_query(ClassQuery::Binary("Assigned")) ); assert_eq!( t(r"\p{gc:ascii}"), hir_uclass_query(ClassQuery::Binary("ASCII")) ); assert_eq!( t_err(r"(?-u)\pZ"), TestError { kind: hir::ErrorKind::UnicodeNotAllowed, span: Span::new( Position::new(5, 1, 6), Position::new(8, 1, 9) ), } ); assert_eq!( t_err(r"(?-u)\p{Separator}"), TestError { kind: hir::ErrorKind::UnicodeNotAllowed, span: Span::new( Position::new(5, 1, 6), Position::new(18, 1, 19) ), } ); assert_eq!( t_err(r"\pE"), TestError { kind: hir::ErrorKind::UnicodePropertyNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(3, 1, 4) ), } ); assert_eq!( t_err(r"\p{Foo}"), TestError { kind: hir::ErrorKind::UnicodePropertyNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(7, 1, 8) ), } ); assert_eq!( t_err(r"\p{gc:Foo}"), TestError { kind: hir::ErrorKind::UnicodePropertyValueNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(10, 1, 11) ), } ); } #[test] #[cfg(not(feature = "unicode-gencat"))] fn class_unicode_gencat_disabled() { assert_eq!( t_err(r"\p{Separator}"), TestError { kind: hir::ErrorKind::UnicodePropertyNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(13, 1, 14) ), } ); assert_eq!( t_err(r"\p{Any}"), TestError { kind: hir::ErrorKind::UnicodePropertyNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(7, 1, 8) ), } ); } #[test] #[cfg(feature = "unicode-script")] fn class_unicode_script() { assert_eq!( t(r"\p{Greek}"), hir_uclass_query(ClassQuery::Binary("Greek")) ); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)\p{Greek}"), hir_case_fold(hir_uclass_query(ClassQuery::Binary("Greek"))) ); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)\P{Greek}"), hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( "Greek" )))) ); assert_eq!( t_err(r"\p{sc:Foo}"), TestError { kind: hir::ErrorKind::UnicodePropertyValueNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(10, 1, 11) ), } ); assert_eq!( t_err(r"\p{scx:Foo}"), TestError { kind: hir::ErrorKind::UnicodePropertyValueNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(11, 1, 12) ), } ); } #[test] #[cfg(not(feature = "unicode-script"))] fn class_unicode_script_disabled() { assert_eq!( t_err(r"\p{Greek}"), TestError { kind: hir::ErrorKind::UnicodePropertyNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(9, 1, 10) ), } ); assert_eq!( t_err(r"\p{scx:Greek}"), TestError { kind: hir::ErrorKind::UnicodePropertyNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(13, 1, 14) ), } ); } #[test] #[cfg(feature = "unicode-age")] fn class_unicode_age() { assert_eq!( t_err(r"\p{age:Foo}"), TestError { kind: hir::ErrorKind::UnicodePropertyValueNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(11, 1, 12) ), } ); } #[test] #[cfg(feature = "unicode-gencat")] fn class_unicode_any_empty() { assert_eq!(t(r"\P{any}"), hir_uclass(&[]),); } #[test] #[cfg(not(feature = "unicode-age"))] fn class_unicode_age_disabled() { assert_eq!( t_err(r"\p{age:3.0}"), TestError { kind: hir::ErrorKind::UnicodePropertyNotFound, span: Span::new( Position::new(0, 1, 1), Position::new(11, 1, 12) ), } ); } #[test] fn class_bracketed() { assert_eq!(t("[a]"), hir_lit("a")); assert_eq!(t("[ab]"), hir_uclass(&[('a', 'b')])); assert_eq!(t("[^[a]]"), class_negate(uclass(&[('a', 'a')]))); assert_eq!(t("[a-z]"), hir_uclass(&[('a', 'z')])); assert_eq!(t("[a-fd-h]"), hir_uclass(&[('a', 'h')])); assert_eq!(t("[a-fg-m]"), hir_uclass(&[('a', 'm')])); assert_eq!(t(r"[\x00]"), hir_uclass(&[('\0', '\0')])); assert_eq!(t(r"[\n]"), hir_uclass(&[('\n', '\n')])); assert_eq!(t("[\n]"), hir_uclass(&[('\n', '\n')])); #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] assert_eq!(t(r"[\d]"), hir_uclass_query(ClassQuery::Binary("digit"))); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[\pZ]"), hir_uclass_query(ClassQuery::Binary("separator")) ); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[\p{separator}]"), hir_uclass_query(ClassQuery::Binary("separator")) ); #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] assert_eq!(t(r"[^\D]"), hir_uclass_query(ClassQuery::Binary("digit"))); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[^\PZ]"), hir_uclass_query(ClassQuery::Binary("separator")) ); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[^\P{separator}]"), hir_uclass_query(ClassQuery::Binary("separator")) ); #[cfg(all( feature = "unicode-case", any(feature = "unicode-perl", feature = "unicode-gencat") ))] assert_eq!( t(r"(?i)[^\D]"), hir_uclass_query(ClassQuery::Binary("digit")) ); #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] assert_eq!( t(r"(?i)[^\P{greek}]"), hir_case_fold(hir_uclass_query(ClassQuery::Binary("greek"))) ); assert_eq!(t("(?-u)[a]"), hir_bclass(&[(b'a', b'a')])); assert_eq!(t(r"(?-u)[\x00]"), hir_bclass(&[(b'\0', b'\0')])); assert_eq!(t_bytes(r"(?-u)[\xFF]"), hir_bclass(&[(b'\xFF', b'\xFF')])); #[cfg(feature = "unicode-case")] assert_eq!(t("(?i)[a]"), hir_uclass(&[('A', 'A'), ('a', 'a')])); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[k]"), hir_uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}'),]) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[β]"), hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) ); assert_eq!(t("(?i-u)[k]"), hir_bclass(&[(b'K', b'K'), (b'k', b'k'),])); assert_eq!(t("[^a]"), class_negate(uclass(&[('a', 'a')]))); assert_eq!(t(r"[^\x00]"), class_negate(uclass(&[('\0', '\0')]))); assert_eq!( t_bytes("(?-u)[^a]"), class_negate(bclass(&[(b'a', b'a')])) ); #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] assert_eq!( t(r"[^\d]"), hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) ); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[^\pZ]"), hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) ); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[^\p{separator}]"), hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) ); #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] assert_eq!( t(r"(?i)[^\p{greek}]"), hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( "greek" )))) ); #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] assert_eq!( t(r"(?i)[\P{greek}]"), hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( "greek" )))) ); // Test some weird cases. assert_eq!(t(r"[\[]"), hir_uclass(&[('[', '[')])); assert_eq!(t(r"[&]"), hir_uclass(&[('&', '&')])); assert_eq!(t(r"[\&]"), hir_uclass(&[('&', '&')])); assert_eq!(t(r"[\&\&]"), hir_uclass(&[('&', '&')])); assert_eq!(t(r"[\x00-&]"), hir_uclass(&[('\0', '&')])); assert_eq!(t(r"[&-\xFF]"), hir_uclass(&[('&', '\u{FF}')])); assert_eq!(t(r"[~]"), hir_uclass(&[('~', '~')])); assert_eq!(t(r"[\~]"), hir_uclass(&[('~', '~')])); assert_eq!(t(r"[\~\~]"), hir_uclass(&[('~', '~')])); assert_eq!(t(r"[\x00-~]"), hir_uclass(&[('\0', '~')])); assert_eq!(t(r"[~-\xFF]"), hir_uclass(&[('~', '\u{FF}')])); assert_eq!(t(r"[-]"), hir_uclass(&[('-', '-')])); assert_eq!(t(r"[\-]"), hir_uclass(&[('-', '-')])); assert_eq!(t(r"[\-\-]"), hir_uclass(&[('-', '-')])); assert_eq!(t(r"[\x00-\-]"), hir_uclass(&[('\0', '-')])); assert_eq!(t(r"[\--\xFF]"), hir_uclass(&[('-', '\u{FF}')])); assert_eq!( t_err("(?-u)[^a]"), TestError { kind: hir::ErrorKind::InvalidUtf8, span: Span::new( Position::new(5, 1, 6), Position::new(9, 1, 10) ), } ); #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] assert_eq!(t(r"[^\s\S]"), hir_uclass(&[]),); #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] assert_eq!(t_bytes(r"(?-u)[^\s\S]"), hir_bclass(&[]),); } #[test] fn class_bracketed_union() { assert_eq!(t("[a-zA-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[a\pZb]"), hir_union( hir_uclass(&[('a', 'b')]), hir_uclass_query(ClassQuery::Binary("separator")) ) ); #[cfg(all(feature = "unicode-gencat", feature = "unicode-script"))] assert_eq!( t(r"[\pZ\p{Greek}]"), hir_union( hir_uclass_query(ClassQuery::Binary("greek")), hir_uclass_query(ClassQuery::Binary("separator")) ) ); #[cfg(all( feature = "unicode-age", feature = "unicode-gencat", feature = "unicode-script" ))] assert_eq!( t(r"[\p{age:3.0}\pZ\p{Greek}]"), hir_union( hir_uclass_query(ClassQuery::ByValue { property_name: "age", property_value: "3.0", }), hir_union( hir_uclass_query(ClassQuery::Binary("greek")), hir_uclass_query(ClassQuery::Binary("separator")) ) ) ); #[cfg(all( feature = "unicode-age", feature = "unicode-gencat", feature = "unicode-script" ))] assert_eq!( t(r"[[[\p{age:3.0}\pZ]\p{Greek}][\p{Cyrillic}]]"), hir_union( hir_uclass_query(ClassQuery::ByValue { property_name: "age", property_value: "3.0", }), hir_union( hir_uclass_query(ClassQuery::Binary("cyrillic")), hir_union( hir_uclass_query(ClassQuery::Binary("greek")), hir_uclass_query(ClassQuery::Binary("separator")) ) ) ) ); #[cfg(all( feature = "unicode-age", feature = "unicode-case", feature = "unicode-gencat", feature = "unicode-script" ))] assert_eq!( t(r"(?i)[\p{age:3.0}\pZ\p{Greek}]"), hir_case_fold(hir_union( hir_uclass_query(ClassQuery::ByValue { property_name: "age", property_value: "3.0", }), hir_union( hir_uclass_query(ClassQuery::Binary("greek")), hir_uclass_query(ClassQuery::Binary("separator")) ) )) ); #[cfg(all( feature = "unicode-age", feature = "unicode-gencat", feature = "unicode-script" ))] assert_eq!( t(r"[^\p{age:3.0}\pZ\p{Greek}]"), hir_negate(hir_union( hir_uclass_query(ClassQuery::ByValue { property_name: "age", property_value: "3.0", }), hir_union( hir_uclass_query(ClassQuery::Binary("greek")), hir_uclass_query(ClassQuery::Binary("separator")) ) )) ); #[cfg(all( feature = "unicode-age", feature = "unicode-case", feature = "unicode-gencat", feature = "unicode-script" ))] assert_eq!( t(r"(?i)[^\p{age:3.0}\pZ\p{Greek}]"), hir_negate(hir_case_fold(hir_union( hir_uclass_query(ClassQuery::ByValue { property_name: "age", property_value: "3.0", }), hir_union( hir_uclass_query(ClassQuery::Binary("greek")), hir_uclass_query(ClassQuery::Binary("separator")) ) ))) ); } #[test] fn class_bracketed_nested() { assert_eq!(t(r"[a[^c]]"), class_negate(uclass(&[('c', 'c')]))); assert_eq!(t(r"[a-b[^c]]"), class_negate(uclass(&[('c', 'c')]))); assert_eq!(t(r"[a-c[^c]]"), class_negate(uclass(&[]))); assert_eq!(t(r"[^a[^c]]"), hir_uclass(&[('c', 'c')])); assert_eq!(t(r"[^a-b[^c]]"), hir_uclass(&[('c', 'c')])); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)[a[^c]]"), hir_negate(class_case_fold(uclass(&[('c', 'c')]))) ); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)[a-b[^c]]"), hir_negate(class_case_fold(uclass(&[('c', 'c')]))) ); #[cfg(feature = "unicode-case")] assert_eq!(t(r"(?i)[^a[^c]]"), hir_uclass(&[('C', 'C'), ('c', 'c')])); #[cfg(feature = "unicode-case")] assert_eq!( t(r"(?i)[^a-b[^c]]"), hir_uclass(&[('C', 'C'), ('c', 'c')]) ); assert_eq!(t(r"[^a-c[^c]]"), hir_uclass(&[]),); #[cfg(feature = "unicode-case")] assert_eq!(t(r"(?i)[^a-c[^c]]"), hir_uclass(&[]),); } #[test] fn class_bracketed_intersect() { assert_eq!(t("[abc&&b-c]"), hir_uclass(&[('b', 'c')])); assert_eq!(t("[abc&&[b-c]]"), hir_uclass(&[('b', 'c')])); assert_eq!(t("[[abc]&&[b-c]]"), hir_uclass(&[('b', 'c')])); assert_eq!(t("[a-z&&b-y&&c-x]"), hir_uclass(&[('c', 'x')])); assert_eq!(t("[c-da-b&&a-d]"), hir_uclass(&[('a', 'd')])); assert_eq!(t("[a-d&&c-da-b]"), hir_uclass(&[('a', 'd')])); assert_eq!(t(r"[a-z&&a-c]"), hir_uclass(&[('a', 'c')])); assert_eq!(t(r"[[a-z&&a-c]]"), hir_uclass(&[('a', 'c')])); assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); assert_eq!(t("(?-u)[abc&&b-c]"), hir_bclass(&[(b'b', b'c')])); assert_eq!(t("(?-u)[abc&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); assert_eq!(t("(?-u)[[abc]&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); assert_eq!(t("(?-u)[a-z&&b-y&&c-x]"), hir_bclass(&[(b'c', b'x')])); assert_eq!(t("(?-u)[c-da-b&&a-d]"), hir_bclass(&[(b'a', b'd')])); assert_eq!(t("(?-u)[a-d&&c-da-b]"), hir_bclass(&[(b'a', b'd')])); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[abc&&b-c]"), hir_case_fold(hir_uclass(&[('b', 'c')])) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[abc&&[b-c]]"), hir_case_fold(hir_uclass(&[('b', 'c')])) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[[abc]&&[b-c]]"), hir_case_fold(hir_uclass(&[('b', 'c')])) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[a-z&&b-y&&c-x]"), hir_case_fold(hir_uclass(&[('c', 'x')])) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[c-da-b&&a-d]"), hir_case_fold(hir_uclass(&[('a', 'd')])) ); #[cfg(feature = "unicode-case")] assert_eq!( t("(?i)[a-d&&c-da-b]"), hir_case_fold(hir_uclass(&[('a', 'd')])) ); assert_eq!( t("(?i-u)[abc&&b-c]"), hir_case_fold(hir_bclass(&[(b'b', b'c')])) ); assert_eq!( t("(?i-u)[abc&&[b-c]]"), hir_case_fold(hir_bclass(&[(b'b', b'c')])) ); assert_eq!( t("(?i-u)[[abc]&&[b-c]]"), hir_case_fold(hir_bclass(&[(b'b', b'c')])) ); assert_eq!( t("(?i-u)[a-z&&b-y&&c-x]"), hir_case_fold(hir_bclass(&[(b'c', b'x')])) ); assert_eq!( t("(?i-u)[c-da-b&&a-d]"), hir_case_fold(hir_bclass(&[(b'a', b'd')])) ); assert_eq!( t("(?i-u)[a-d&&c-da-b]"), hir_case_fold(hir_bclass(&[(b'a', b'd')])) ); // In `[a^]`, `^` does not need to be escaped, so it makes sense that // `^` is also allowed to be unescaped after `&&`. assert_eq!(t(r"[\^&&^]"), hir_uclass(&[('^', '^')])); // `]` needs to be escaped after `&&` since it's not at start of class. assert_eq!(t(r"[]&&\]]"), hir_uclass(&[(']', ']')])); assert_eq!(t(r"[-&&-]"), hir_uclass(&[('-', '-')])); assert_eq!(t(r"[\&&&&]"), hir_uclass(&[('&', '&')])); assert_eq!(t(r"[\&&&\&]"), hir_uclass(&[('&', '&')])); // Test precedence. assert_eq!( t(r"[a-w&&[^c-g]z]"), hir_uclass(&[('a', 'b'), ('h', 'w')]) ); } #[test] fn class_bracketed_intersect_negate() { #[cfg(feature = "unicode-perl")] assert_eq!( t(r"[^\w&&\d]"), hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) ); assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); #[cfg(feature = "unicode-perl")] assert_eq!( t(r"[^[\w&&\d]]"), hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) ); #[cfg(feature = "unicode-perl")] assert_eq!( t(r"[^[^\w&&\d]]"), hir_uclass_query(ClassQuery::Binary("digit")) ); #[cfg(feature = "unicode-perl")] assert_eq!(t(r"[[[^\w]&&[^\d]]]"), hir_negate(hir_uclass_perl_word())); #[cfg(feature = "unicode-perl")] assert_eq!( t_bytes(r"(?-u)[^\w&&\d]"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) ); assert_eq!( t_bytes(r"(?-u)[^[a-z&&a-c]]"), hir_negate(hir_bclass(&[(b'a', b'c')])) ); assert_eq!( t_bytes(r"(?-u)[^[\w&&\d]]"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) ); assert_eq!( t_bytes(r"(?-u)[^[^\w&&\d]]"), hir_ascii_bclass(&ast::ClassAsciiKind::Digit) ); assert_eq!( t_bytes(r"(?-u)[[[^\w]&&[^\d]]]"), hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) ); } #[test] fn class_bracketed_difference() { #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"[\pL--[:ascii:]]"), hir_difference( hir_uclass_query(ClassQuery::Binary("letter")), hir_uclass(&[('\0', '\x7F')]) ) ); assert_eq!( t(r"(?-u)[[:alpha:]--[:lower:]]"), hir_bclass(&[(b'A', b'Z')]) ); } #[test] fn class_bracketed_symmetric_difference() { #[cfg(feature = "unicode-script")] assert_eq!( t(r"[\p{sc:Greek}~~\p{scx:Greek}]"), hir_uclass(&[ ('\u{0342}', '\u{0342}'), ('\u{0345}', '\u{0345}'), ('\u{1DC0}', '\u{1DC1}'), ]) ); assert_eq!(t(r"[a-g~~c-j]"), hir_uclass(&[('a', 'b'), ('h', 'j')])); assert_eq!( t(r"(?-u)[a-g~~c-j]"), hir_bclass(&[(b'a', b'b'), (b'h', b'j')]) ); } #[test] fn ignore_whitespace() { assert_eq!(t(r"(?x)\12 3"), hir_lit("\n3")); assert_eq!(t(r"(?x)\x { 53 }"), hir_lit("S")); assert_eq!( t(r"(?x)\x # comment { # comment 53 # comment } #comment"), hir_lit("S") ); assert_eq!(t(r"(?x)\x 53"), hir_lit("S")); assert_eq!( t(r"(?x)\x # comment 53 # comment"), hir_lit("S") ); assert_eq!(t(r"(?x)\x5 3"), hir_lit("S")); #[cfg(feature = "unicode-gencat")] assert_eq!( t(r"(?x)\p # comment { # comment Separator # comment } # comment"), hir_uclass_query(ClassQuery::Binary("separator")) ); assert_eq!( t(r"(?x)a # comment { # comment 5 # comment , # comment 10 # comment } # comment"), hir_range(true, 5, Some(10), hir_lit("a")) ); assert_eq!(t(r"(?x)a\ # hi there"), hir_lit("a ")); } #[test] fn analysis_is_utf8() { // Positive examples. assert!(props_bytes(r"a").is_utf8()); assert!(props_bytes(r"ab").is_utf8()); assert!(props_bytes(r"(?-u)a").is_utf8()); assert!(props_bytes(r"(?-u)ab").is_utf8()); assert!(props_bytes(r"\xFF").is_utf8()); assert!(props_bytes(r"\xFF\xFF").is_utf8()); assert!(props_bytes(r"[^a]").is_utf8()); assert!(props_bytes(r"[^a][^a]").is_utf8()); assert!(props_bytes(r"\b").is_utf8()); assert!(props_bytes(r"\B").is_utf8()); assert!(props_bytes(r"(?-u)\b").is_utf8()); assert!(props_bytes(r"(?-u)\B").is_utf8()); // Negative examples. assert!(!props_bytes(r"(?-u)\xFF").is_utf8()); assert!(!props_bytes(r"(?-u)\xFF\xFF").is_utf8()); assert!(!props_bytes(r"(?-u)[^a]").is_utf8()); assert!(!props_bytes(r"(?-u)[^a][^a]").is_utf8()); } #[test] fn analysis_captures_len() { assert_eq!(0, props(r"a").explicit_captures_len()); assert_eq!(0, props(r"(?:a)").explicit_captures_len()); assert_eq!(0, props(r"(?i-u:a)").explicit_captures_len()); assert_eq!(0, props(r"(?i-u)a").explicit_captures_len()); assert_eq!(1, props(r"(a)").explicit_captures_len()); assert_eq!(1, props(r"(?P<foo>a)").explicit_captures_len()); assert_eq!(1, props(r"()").explicit_captures_len()); assert_eq!(1, props(r"()a").explicit_captures_len()); assert_eq!(1, props(r"(a)+").explicit_captures_len()); assert_eq!(2, props(r"(a)(b)").explicit_captures_len()); assert_eq!(2, props(r"(a)|(b)").explicit_captures_len()); assert_eq!(2, props(r"((a))").explicit_captures_len()); assert_eq!(1, props(r"([a&&b])").explicit_captures_len()); } #[test] fn analysis_static_captures_len() { let len = |pattern| props(pattern).static_explicit_captures_len(); assert_eq!(Some(0), len(r"")); assert_eq!(Some(0), len(r"foo|bar")); assert_eq!(None, len(r"(foo)|bar")); assert_eq!(None, len(r"foo|(bar)")); assert_eq!(Some(1), len(r"(foo|bar)")); assert_eq!(Some(1), len(r"(a|b|c|d|e|f)")); assert_eq!(Some(1), len(r"(a)|(b)|(c)|(d)|(e)|(f)")); assert_eq!(Some(2), len(r"(a)(b)|(c)(d)|(e)(f)")); assert_eq!(Some(6), len(r"(a)(b)(c)(d)(e)(f)")); assert_eq!(Some(3), len(r"(a)(b)(extra)|(a)(b)()")); assert_eq!(Some(3), len(r"(a)(b)((?:extra)?)")); assert_eq!(None, len(r"(a)(b)(extra)?")); assert_eq!(Some(1), len(r"(foo)|(bar)")); assert_eq!(Some(2), len(r"(foo)(bar)")); assert_eq!(Some(2), len(r"(foo)+(bar)")); assert_eq!(None, len(r"(foo)*(bar)")); assert_eq!(Some(0), len(r"(foo)?{0}")); assert_eq!(None, len(r"(foo)?{1}")); assert_eq!(Some(1), len(r"(foo){1}")); assert_eq!(Some(1), len(r"(foo){1,}")); assert_eq!(Some(1), len(r"(foo){1,}?")); assert_eq!(None, len(r"(foo){1,}??")); assert_eq!(None, len(r"(foo){0,}")); assert_eq!(Some(1), len(r"(foo)(?:bar)")); assert_eq!(Some(2), len(r"(foo(?:bar)+)(?:baz(boo))")); assert_eq!(Some(2), len(r"(?P<bar>foo)(?:bar)(bal|loon)")); assert_eq!( Some(2), len(r#"<(a)[^>]+href="([^"]+)"|<(img)[^>]+src="([^"]+)""#) ); } #[test] fn analysis_is_all_assertions() { // Positive examples. let p = props(r"\b"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"\B"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"^"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"$"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"\A"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"\z"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"$^\z\A\b\B"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"$|^|\z|\A|\b|\B"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"^$|$^"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); let p = props(r"((\b)+())*^"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(0)); // Negative examples. let p = props(r"^a"); assert!(!p.look_set().is_empty()); assert_eq!(p.minimum_len(), Some(1)); } #[test] fn analysis_look_set_prefix_any() { let p = props(r"(?-u)(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"); assert!(p.look_set_prefix_any().contains(Look::WordAscii)); } #[test] fn analysis_is_anchored() { let is_start = |p| props(p).look_set_prefix().contains(Look::Start); let is_end = |p| props(p).look_set_suffix().contains(Look::End); // Positive examples. assert!(is_start(r"^")); assert!(is_end(r"$")); assert!(is_start(r"^^")); assert!(props(r"$$").look_set_suffix().contains(Look::End)); assert!(is_start(r"^$")); assert!(is_end(r"^$")); assert!(is_start(r"^foo")); assert!(is_end(r"foo$")); assert!(is_start(r"^foo|^bar")); assert!(is_end(r"foo$|bar$")); assert!(is_start(r"^(foo|bar)")); assert!(is_end(r"(foo|bar)$")); assert!(is_start(r"^+")); assert!(is_end(r"$+")); assert!(is_start(r"^++")); assert!(is_end(r"$++")); assert!(is_start(r"(^)+")); assert!(is_end(r"($)+")); assert!(is_start(r"$^")); assert!(is_start(r"$^")); assert!(is_start(r"$^|^$")); assert!(is_end(r"$^|^$")); assert!(is_start(r"\b^")); assert!(is_end(r"$\b")); assert!(is_start(r"^(?m:^)")); assert!(is_end(r"(?m:$)$")); assert!(is_start(r"(?m:^)^")); assert!(is_end(r"$(?m:$)")); // Negative examples. assert!(!is_start(r"(?m)^")); assert!(!is_end(r"(?m)$")); assert!(!is_start(r"(?m:^$)|$^")); assert!(!is_end(r"(?m:^$)|$^")); assert!(!is_start(r"$^|(?m:^$)")); assert!(!is_end(r"$^|(?m:^$)")); assert!(!is_start(r"a^")); assert!(!is_start(r"$a")); assert!(!is_end(r"a^")); assert!(!is_end(r"$a")); assert!(!is_start(r"^foo|bar")); assert!(!is_end(r"foo|bar$")); assert!(!is_start(r"^*")); assert!(!is_end(r"$*")); assert!(!is_start(r"^*+")); assert!(!is_end(r"$*+")); assert!(!is_start(r"^+*")); assert!(!is_end(r"$+*")); assert!(!is_start(r"(^)*")); assert!(!is_end(r"($)*")); } #[test] fn analysis_is_any_anchored() { let is_start = |p| props(p).look_set().contains(Look::Start); let is_end = |p| props(p).look_set().contains(Look::End); // Positive examples. assert!(is_start(r"^")); assert!(is_end(r"$")); assert!(is_start(r"\A")); assert!(is_end(r"\z")); // Negative examples. assert!(!is_start(r"(?m)^")); assert!(!is_end(r"(?m)$")); assert!(!is_start(r"$")); assert!(!is_end(r"^")); } #[test] fn analysis_can_empty() { // Positive examples. let assert_empty = |p| assert_eq!(Some(0), props_bytes(p).minimum_len()); assert_empty(r""); assert_empty(r"()"); assert_empty(r"()*"); assert_empty(r"()+"); assert_empty(r"()?"); assert_empty(r"a*"); assert_empty(r"a?"); assert_empty(r"a{0}"); assert_empty(r"a{0,}"); assert_empty(r"a{0,1}"); assert_empty(r"a{0,10}"); #[cfg(feature = "unicode-gencat")] assert_empty(r"\pL*"); assert_empty(r"a*|b"); assert_empty(r"b|a*"); assert_empty(r"a|"); assert_empty(r"|a"); assert_empty(r"a||b"); assert_empty(r"a*a?(abcd)*"); assert_empty(r"^"); assert_empty(r"$"); assert_empty(r"(?m)^"); assert_empty(r"(?m)$"); assert_empty(r"\A"); assert_empty(r"\z"); assert_empty(r"\B"); assert_empty(r"(?-u)\B"); assert_empty(r"\b"); assert_empty(r"(?-u)\b"); // Negative examples. let assert_non_empty = |p| assert_ne!(Some(0), props_bytes(p).minimum_len()); assert_non_empty(r"a+"); assert_non_empty(r"a{1}"); assert_non_empty(r"a{1,}"); assert_non_empty(r"a{1,2}"); assert_non_empty(r"a{1,10}"); assert_non_empty(r"b|a"); assert_non_empty(r"a*a+(abcd)*"); #[cfg(feature = "unicode-gencat")] assert_non_empty(r"\P{any}"); assert_non_empty(r"[a--a]"); assert_non_empty(r"[a&&b]"); } #[test] fn analysis_is_literal() { // Positive examples. assert!(props(r"a").is_literal()); assert!(props(r"ab").is_literal()); assert!(props(r"abc").is_literal()); assert!(props(r"(?m)abc").is_literal()); assert!(props(r"(?:a)").is_literal()); assert!(props(r"foo(?:a)").is_literal()); assert!(props(r"(?:a)foo").is_literal()); assert!(props(r"[a]").is_literal()); // Negative examples. assert!(!props(r"").is_literal()); assert!(!props(r"^").is_literal()); assert!(!props(r"a|b").is_literal()); assert!(!props(r"(a)").is_literal()); assert!(!props(r"a+").is_literal()); assert!(!props(r"foo(a)").is_literal()); assert!(!props(r"(a)foo").is_literal()); assert!(!props(r"[ab]").is_literal()); } #[test] fn analysis_is_alternation_literal() { // Positive examples. assert!(props(r"a").is_alternation_literal()); assert!(props(r"ab").is_alternation_literal()); assert!(props(r"abc").is_alternation_literal()); assert!(props(r"(?m)abc").is_alternation_literal()); assert!(props(r"foo|bar").is_alternation_literal()); assert!(props(r"foo|bar|baz").is_alternation_literal()); assert!(props(r"[a]").is_alternation_literal()); assert!(props(r"(?:ab)|cd").is_alternation_literal()); assert!(props(r"ab|(?:cd)").is_alternation_literal()); // Negative examples. assert!(!props(r"").is_alternation_literal()); assert!(!props(r"^").is_alternation_literal()); assert!(!props(r"(a)").is_alternation_literal()); assert!(!props(r"a+").is_alternation_literal()); assert!(!props(r"foo(a)").is_alternation_literal()); assert!(!props(r"(a)foo").is_alternation_literal()); assert!(!props(r"[ab]").is_alternation_literal()); assert!(!props(r"[ab]|b").is_alternation_literal()); assert!(!props(r"a|[ab]").is_alternation_literal()); assert!(!props(r"(a)|b").is_alternation_literal()); assert!(!props(r"a|(b)").is_alternation_literal()); assert!(!props(r"a|b").is_alternation_literal()); assert!(!props(r"a|b|c").is_alternation_literal()); assert!(!props(r"[a]|b").is_alternation_literal()); assert!(!props(r"a|[b]").is_alternation_literal()); assert!(!props(r"(?:a)|b").is_alternation_literal()); assert!(!props(r"a|(?:b)").is_alternation_literal()); assert!(!props(r"(?:z|xx)@|xx").is_alternation_literal()); } // This tests that the smart Hir::repetition constructors does some basic // simplifications. #[test] fn smart_repetition() { assert_eq!(t(r"a{0}"), Hir::empty()); assert_eq!(t(r"a{1}"), hir_lit("a")); assert_eq!(t(r"\B{32111}"), hir_look(hir::Look::WordUnicodeNegate)); } // This tests that the smart Hir::concat constructor simplifies the given // exprs in a way we expect. #[test] fn smart_concat() { assert_eq!(t(""), Hir::empty()); assert_eq!(t("(?:)"), Hir::empty()); assert_eq!(t("abc"), hir_lit("abc")); assert_eq!(t("(?:foo)(?:bar)"), hir_lit("foobar")); assert_eq!(t("quux(?:foo)(?:bar)baz"), hir_lit("quuxfoobarbaz")); assert_eq!( t("foo(?:bar^baz)quux"), hir_cat(vec![ hir_lit("foobar"), hir_look(hir::Look::Start), hir_lit("bazquux"), ]) ); assert_eq!( t("foo(?:ba(?:r^b)az)quux"), hir_cat(vec![ hir_lit("foobar"), hir_look(hir::Look::Start), hir_lit("bazquux"), ]) ); } // This tests that the smart Hir::alternation constructor simplifies the // given exprs in a way we expect. #[test] fn smart_alternation() { assert_eq!( t("(?:foo)|(?:bar)"), hir_alt(vec![hir_lit("foo"), hir_lit("bar")]) ); assert_eq!( t("quux|(?:abc|def|xyz)|baz"), hir_alt(vec![ hir_lit("quux"), hir_lit("abc"), hir_lit("def"), hir_lit("xyz"), hir_lit("baz"), ]) ); assert_eq!( t("quux|(?:abc|(?:def|mno)|xyz)|baz"), hir_alt(vec![ hir_lit("quux"), hir_lit("abc"), hir_lit("def"), hir_lit("mno"), hir_lit("xyz"), hir_lit("baz"), ]) ); assert_eq!( t("a|b|c|d|e|f|x|y|z"), hir_uclass(&[('a', 'f'), ('x', 'z')]), ); // Tests that we lift common prefixes out of an alternation. assert_eq!( t("[A-Z]foo|[A-Z]quux"), hir_cat(vec![ hir_uclass(&[('A', 'Z')]), hir_alt(vec![hir_lit("foo"), hir_lit("quux")]), ]), ); assert_eq!( t("[A-Z][A-Z]|[A-Z]quux"), hir_cat(vec![ hir_uclass(&[('A', 'Z')]), hir_alt(vec![hir_uclass(&[('A', 'Z')]), hir_lit("quux")]), ]), ); assert_eq!( t("[A-Z][A-Z]|[A-Z][A-Z]quux"), hir_cat(vec![ hir_uclass(&[('A', 'Z')]), hir_uclass(&[('A', 'Z')]), hir_alt(vec![Hir::empty(), hir_lit("quux")]), ]), ); assert_eq!( t("[A-Z]foo|[A-Z]foobar"), hir_cat(vec![ hir_uclass(&[('A', 'Z')]), hir_alt(vec![hir_lit("foo"), hir_lit("foobar")]), ]), ); } } <file_sep>/regex-automata/tests/dfa/mod.rs #[cfg(all(feature = "dfa-build", feature = "dfa-search"))] mod api; #[cfg(feature = "dfa-onepass")] mod onepass; #[cfg(all(feature = "dfa-build", feature = "dfa-search"))] mod regression; #[cfg(all(not(miri), feature = "dfa-build", feature = "dfa-search"))] mod suite; <file_sep>/regex-cli/args/patterns.rs use std::path::PathBuf; use { anyhow::Context, lexopt::{Arg, Parser, ValueExt}, }; use crate::args::{Configurable, Usage}; /// A configuration object for reading patterns from the command line. /// /// It supports two different modes. One mode limits it to reading patterns /// from optional flags, i.e., `-p/--pattern` and `-f/--pattern-file`. The /// other mode permits reading from optional flags, but also reads all /// positional arguments as patterns too. The latter is convenient in cases /// where patterns are the only input to a command (like `regex-cli debug`). #[derive(Debug, Default)] pub struct Config { patterns: Vec<String>, fixed_strings: bool, combine: bool, mode: Mode, } impl Config { /// Creates a new configuration that will greedily treat every positional /// argument as a pattern. This also supports all other ways of providing /// patterns, i.e., the `-p/--pattern` and `-f/--file` flags. /// /// This is useful for commands that don't accept any other kind of /// positional arguments. pub fn positional() -> Config { Config { mode: Mode::Positional, ..Config::default() } } /// Creates a new configuration that will never treat a positional argument /// as a pattern. Instead, it only reads patterns from the `-p/--pattern` /// and `-f/--file` flags. /// /// This is useful for commands that accept other kinds of positional /// arguments. Forcing the use of a flag helps avoid resolving more /// complicated ambiguities regarding how to treat each positional /// argument. /// /// This is equivalent to `Config::default()`. pub fn only_flags() -> Config { Config::default() } /// Returns all of the pattern strings from this configuration, escaping /// and joining them if requested. When joining is requested, then at most /// one pattern is returned. /// /// Note that it is legal for this to return zero patterns! pub fn get(&self) -> anyhow::Result<Vec<String>> { let mut pats = self.patterns.clone(); if self.fixed_strings { pats = pats.iter().map(|p| regex_syntax::escape(p)).collect(); } if self.combine { // FIXME: This is... not technically correct, since someone could // provide a pattern `ab(cd` and then `ef)gh`. Neither are valid // patterns, but by joining them with a |, we get `ab(cd|ef)gh` // which is valid. The solution to this is I think to try and // parse the regex to make sure it's valid, but we should be // careful to only use the AST parser. The problem here is that // we don't technically have the configuration of the parser at // this point. We could *ask* for it. We could also just assume a // default configuration since the AST parser doesn't have many // configuration knobs. But probably we should just ask for the // parser configuration here. pats = vec![pats.join("|")]; } Ok(pats) } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('p') | Arg::Long("pattern") => { let pat = p.value().context("-p/--pattern needs a value")?; let pat = pat .string() .context("-p/--pattern must be valid UTF-8")?; self.patterns.push(pat); } Arg::Short('F') | Arg::Long("fixed-strings") => { self.fixed_strings = true; } Arg::Short('f') | Arg::Long("pattern-file") => { let path = PathBuf::from(p.value().context("-f/--pattern-file")?); let contents = std::fs::read_to_string(&path).with_context(|| { anyhow::anyhow!("failed to read {}", path.display()) })?; self.patterns.extend(contents.lines().map(|x| x.to_string())); } Arg::Long("combine-patterns") => { self.combine = true; } Arg::Value(ref mut v) => { if !matches!(self.mode, Mode::Positional) { return Ok(false); } let v = std::mem::take(v); self.patterns .push(v.string().context("patterns must be valid UTF-8")?); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "-p, --pattern <pattern>", "Add a pattern to this command.", r#" This adds a new pattern to the command. All of the patterns provided, whether by this flag, as a positional argument (if supported) or via the -f/--pattern-file flag are combined into one regex matcher. All patterns given must be valid UTF-8. "#, ), Usage::new( "-f, --pattern-file", "Read patterns from the file given.", r#" Reads patterns, one per line, from the file given. All of the patterns provided, whether by this flag, as a positional argument (if supported) or via the -p/--pattern flag are combined into one regex matcher. All patterns given must be valid UTF-8. "#, ), Usage::new( "-F, --fixed-strings", "Interpret all patterns literally.", r#" When set, all patterns are interpreted as literal strings. So for example, special regex meta characters like '+' are matched literally instead of being given special significance. "#, ), Usage::new( "--combine-patterns", "Combine all patterns into one via an alternation.", r#" This flag combines all patterns given in this command into one by joining them together via a '|'. This is useful in cases where you want to provide many different things to search for, but explicitly only want one pattern to be constructed. In terms of debugging the regex engine, this can be used to inspect the practical differences between multi-pattern regexes and single pattern regexes, even when they generally match the same thing. "#, ), ]; USAGES } } /// The parsing behavior of a pattern configuration. That is, either treat /// positional arguments as patterns or not. /// /// The default is to only parse patterns from flags. #[derive(Debug)] enum Mode { Positional, OnlyFlags, } impl Default for Mode { fn default() -> Mode { Mode::OnlyFlags } } <file_sep>/regex-test/lib.rs /*! A crate for defining tests in a TOML format and applying them to regex engine implementations. Generally speaking, if you aren't writing your own regex engine and looking to test it, then this crate is probably not for you. Moreover, this crate does not come with any actual tests. It merely defines the test format and provides some convenient routines for executing tests within the context of Rust unit tests. # Format The entire test corpus is derived from zero or more TOML files. Each TOML file contains zero or more tests, where each test is defined as a table via `[[test]]`. Each test has the following fields: * `name` - A name for the test. It must be unique within its file. A test's [`RegexTest::full_name`] is derived either via `{group_name}/{name}` or `{group_name}/{name}/{additional_name}`, with the latter being used only when [`TestRunner::expand`] is used. The `group_name` is derived from the file stem (the file name without the `.toml suffix). * `regex` - The regex to test. This is either a string or a (possibly empty) list of regex patterns. When using a list, the underlying regex engine is expected to support multiple patterns where each are identified starting from `0` and incrementing by 1 for each subsequent pattern. * `haystack` - The text to search. * `bounds` - An optional field whose value is a table with `start` and `end` fields, whose values must be valid for the given `haystack`. When set, the search will only execute within these bounds. When absent, the bounds correspond to `start = 0` and `end = haystack.len()`. * `matches` - Zero or more match values. Each match value can be in one of four formats: * A simple span, i.e., `[5, 12]`, corresponding to the start and end of the match, in byte offsets. The start is inclusive and the end is exclusive. The pattern ID for the match is assumed to be `0`. * A table corresponding to the matching pattern ID and the span of the match. For example, `{ id = 5, span = [20, 21] }`. * A list of capture group spans, with the first corresponding to the overall match and the pattern ID assumed to be `0`. For example, `[[5, 10], [6, 8], [], [9, 10]]`, where `[]` corresponds to a group present in the regex but one that did not participate in a match. * A table corresponding to the matching pattern ID and a list of spans corresponding to the capture groups. For example, `{ id = 5, spans = [[5, 10], [6, 8], [], [9, 10]] }`. This is the most general, but also most verbose, syntax. * `match-limit` - An optional field that specifies a limit on the number of matches. When absent, no limit is enforced and all matches should be reported by the regex engine. This can be useful, for example, when one only cares about the first match. * `compiles` - An optional field indicating whether the regex is expected to compile. It defaults to `true` when absent. When `true`, if the regex does not compile, then the test fails. Conversely, when `false`, if the regex _does_ compile, then the test fails. * `anchored` - Whether to execute an anchored search or not. Note that this is not the same as adding a `^` to the beginning of your regex pattern. `^` always requires the regex to match at position `0`, but an anchored search simply requires that the regex match at the starting position of the search. (The starting position of the search can be configured via the optional `bounds` field.) * `case-insensitive` - Whether to match the regex case insensitively. This is disabled by default. There is no real difference between using this field and adding a `(?i)` to the beginning of your regex. (Some regex engines may not support `(?i)`.) * `unescape` - When enabled, the haystack is unescaped. Sequences like `\x00` are turned into their corresponding byte values. This permits one to write haystacks that contain invalid UTF-8 without embedding actual invalid UTF-8 into a TOML file (which is not allowed). There is generally no other reason to enable `unescape`. * `unicode` - When enabled, the regex pattern should be compiled with its corresponding Unicode mode enabled. For example, `[^a]` matches any UTF-8 encoding of any codepoint other than `a`. Case insensitivty should be Unicode aware. Unicode classes like `\pL` are available. The Perl classes `\w`, `\s` and `\d` should be Unicode aware. And so on. This is an optional field and is enabled by default. * `utf8` - When this is enabled, all regex match substrings should be entirely valid UTF-8. While parts of the haystack the regex searches through may not be valid UTF-8, only the portions that are valid UTF-8 may be reported in match spans. Importantly, this includes zero-width matches. Zero-width matches must never split the UTF-8 encoding of a single codepoint when this is enabled. This is an optional field and is enabled by default. * `line-terminator` - This sets the line terminator used by the multi-line assertions `(?m:^)` and `(?m:$)`. It defaults to `\n`. It must be exactly one byte. This field is automatically unescaped in order to permit a non-ASCII byte. * `match-kind` - May be one of `all`, `leftmost-first` or `leftmost-longest`. See [`MatchKind`] for more details. This is an optional field and defaults to `leftmost-first`. * `search-kind` - May be one of `earliest`, `leftmost` or `overlapping`. See [`SearchKind`] for more details. This is an optional field and defaults to `leftmost`. */ #![deny(missing_docs)] /// For convenience, `anyhow::Error` is used to represents errors in this /// crate. /// /// For this reason, `anyhow` is a public dependency and is re-exported here. pub extern crate anyhow; use std::{ borrow::Borrow, collections::HashSet, convert::TryFrom, fs, path::Path, }; use { anyhow::{bail, Context, Result}, bstr::{BString, ByteSlice, ByteVec}, serde::Deserialize, }; const ENV_REGEX_TEST: &str = "REGEX_TEST"; const ENV_REGEX_TEST_VERBOSE: &str = "REGEX_TEST_VERBOSE"; /// A collection of regex tests. #[derive(Clone, Debug, Deserialize)] pub struct RegexTests { /// 'default' permits an empty TOML file. #[serde(default, rename = "test")] tests: Vec<RegexTest>, #[serde(skip)] seen: HashSet<String>, } impl RegexTests { /// Create a new empty collection of glob tests. pub fn new() -> RegexTests { RegexTests { tests: vec![], seen: HashSet::new() } } /// Loads all of the tests in the given TOML file. The group name assigned /// to each test is the stem of the file name. For example, if one loads /// `foo/bar.toml`, then the group name for each test will be `bar`. pub fn load<P: AsRef<Path>>(&mut self, path: P) -> Result<()> { let path = path.as_ref(); let data = fs::read(path) .with_context(|| format!("failed to read {}", path.display()))?; let group_name = path .file_stem() .with_context(|| { format!("failed to get file name of {}", path.display()) })? .to_str() .with_context(|| { format!("invalid UTF-8 found in {}", path.display()) })?; self.load_slice(&group_name, &data) .with_context(|| format!("error loading {}", path.display()))?; Ok(()) } /// Load all of the TOML encoded tests in `data` into this collection. /// The given group name is assigned to all loaded tests. pub fn load_slice(&mut self, group_name: &str, data: &[u8]) -> Result<()> { let data = std::str::from_utf8(&data).with_context(|| { format!("data in {} is not valid UTF-8", group_name) })?; let mut index = 1; let mut tests: RegexTests = toml::from_str(&data).with_context(|| { format!("error decoding TOML for '{}'", group_name) })?; for t in &mut tests.tests { t.group = group_name.to_string(); if t.name.is_empty() { t.name = format!("{}", index); index += 1; } t.full_name = format!("{}/{}", t.group, t.name); if t.unescape { t.haystack = BString::from(Vec::unescape_bytes( // OK because TOML requires valid UTF-8. t.haystack.to_str().unwrap(), )); } if t.line_terminator.is_empty() { t.line_terminator = BString::from("\n"); } else { t.line_terminator = BString::from(Vec::unescape_bytes( // OK because TOML requires valid UTF-8. t.line_terminator.to_str().unwrap(), )); anyhow::ensure!( t.line_terminator.len() == 1, "line terminator '{:?}' has length not equal to 1", t.line_terminator, ); } if self.seen.contains(t.full_name()) { bail!("found duplicate tests for name '{}'", t.full_name()); } self.seen.insert(t.full_name().to_string()); } self.tests.extend(tests.tests); Ok(()) } /// Return an iterator over all regex tests that have been loaded. The /// order of the iterator corresponds to the order in which the tests were /// loaded. /// /// This is useful to pass to [`TestRunner::test_iter`]. pub fn iter(&self) -> RegexTestsIter { RegexTestsIter(self.tests.iter()) } } /// A regex test describes the inputs and expected outputs of a regex match. /// /// Each `RegexTest` represents a single `[[test]]` table in a TOML test file. #[derive(Clone, Debug, Deserialize)] #[serde(deny_unknown_fields)] pub struct RegexTest { #[serde(skip)] group: String, #[serde(default)] name: String, #[serde(skip)] additional_name: String, #[serde(skip)] full_name: String, regex: RegexesFormat, haystack: BString, bounds: Option<Span>, matches: Vec<Captures>, #[serde(rename = "match-limit")] match_limit: Option<usize>, #[serde(default = "default_true")] compiles: bool, #[serde(default)] anchored: bool, #[serde(default, rename = "case-insensitive")] case_insensitive: bool, #[serde(default)] unescape: bool, #[serde(default = "default_true")] unicode: bool, #[serde(default = "default_true")] utf8: bool, #[serde(default, rename = "line-terminator")] line_terminator: BString, #[serde(default, rename = "match-kind")] match_kind: MatchKind, #[serde(default, rename = "search-kind")] search_kind: SearchKind, } impl RegexTest { /// Return the group name of this test. /// /// Usually the group name corresponds to a collection of related /// tests. More specifically, when using [`RegexTests::load`], the /// group name corresponds to the file stem (the file name without the /// `.toml` suffix). Otherwise, the group name is whatever is given to /// [`RegexTests::load_slice`]. pub fn group(&self) -> &str { &self.group } /// The name of this test. /// /// Note that this is only the name as given in the `[[test]]` block. The /// actual full name used for filtering and reporting can be retrieved with /// [`RegexTest::full_name`]. pub fn name(&self) -> &str { &self.name } /// The additional name for this test. /// /// This is only non-empty when the test runner was expanded with /// [`TestRunner::expand`]. pub fn additional_name(&self) -> &str { &self.additional_name } /// The full name of this test, which is formed by joining the group /// name, the test name and the additional name with a `/`. pub fn full_name(&self) -> &str { &self.full_name } /// Return all of the regexes that should be matched for this test. This /// slice may be empty! pub fn regexes(&self) -> &[String] { self.regex.patterns() } /// Return the bytes on which the regex should be matched. pub fn haystack(&self) -> &[u8] { &self.haystack } /// Returns the bounds of a search. /// /// If the test didn't specify any bounds, then the bounds returned are /// equivalent to the entire haystack. pub fn bounds(&self) -> Span { self.bounds.unwrap_or(Span { start: 0, end: self.haystack().len() }) } /// Returns the limit on the number of matches that should be reported, /// if specified in the test. /// /// This is useful for tests that only want to check for the first /// match. In which case, the match limit is set to 1. /// /// If there is no match limit, then regex engines are expected to report /// all matches. pub fn match_limit(&self) -> Option<usize> { self.match_limit } /// Returns true if the regex(es) in this test are expected to compile. pub fn compiles(&self) -> bool { self.compiles } /// Whether the regex should perform an anchored search. /// /// This is distinct from putting a `^` in the regex in that `bounds` may /// be specified that permit the regex search to start at a position /// `i > 0`. In which case, enabling anchored mode here requires that any /// matches produced must have a start offset at `i`. pub fn anchored(&self) -> bool { self.anchored } /// Returns true if regex matching should be performed without regard to /// case. pub fn case_insensitive(&self) -> bool { self.case_insensitive } /// Returns true if regex matching should have Unicode mode enabled. /// /// For example, `[^a]` matches any UTF-8 encoding of any codepoint other /// than `a`. Case insensitivty should be Unicode aware. Unicode classes /// like `\pL` are available. The Perl classes `\w`, `\s` and `\d` should /// be Unicode aware. And so on. /// /// This is enabled by default. pub fn unicode(&self) -> bool { self.unicode } /// Returns true if regex matching should exclusively match valid UTF-8. /// When this is disabled, matching on arbitrary bytes is permitted. /// /// When this is enabled, all regex match substrings should be entirely /// valid UTF-8. While parts of the haystack the regex searches through /// may not be valid UTF-8, only the portions that are valid UTF-8 may be /// reported in match spans. /// /// Importantly, this includes zero-width matches. Zero-width matches must /// never split the UTF-8 encoding of a single codepoint when this is /// enabled. /// /// This is enabled by default. pub fn utf8(&self) -> bool { self.utf8 } /// Returns the line terminator that should be used for the multi-line /// assertions `(?m:^)` and `(?m:$)`. /// /// If it isn't set, then this defaults to `\n`. pub fn line_terminator(&self) -> u8 { self.line_terminator[0] } /// Return the match semantics required by this test. pub fn match_kind(&self) -> MatchKind { self.match_kind } /// Return the search semantics required by this test. pub fn search_kind(&self) -> SearchKind { self.search_kind } /// Run the test and return the result produced by the given compiled /// regex. fn test(&self, regex: &mut CompiledRegex) -> TestResult { match regex.matcher { None => TestResult::skip(), Some(ref mut match_regex) => match_regex(self), } } /// Append `/name` to the `full_name` of this test. /// /// This is used to support [`TestRunner::expand`]. fn with_additional_name(&self, name: &str) -> RegexTest { let additional_name = name.to_string(); let full_name = format!("{}/{}", self.full_name, additional_name); RegexTest { additional_name, full_name, ..self.clone() } } /// Returns true if and only if this test expects at least one of the /// regexes to match the haystack. fn is_match(&self) -> bool { !self.matches.is_empty() } /// Returns a slice of pattern IDs that are expected to match the haystack. /// The slice is empty if no match is expected to occur. The IDs returned /// are deduplicated and sorted in ascending order. fn which_matches(&self) -> Vec<usize> { let mut seen = HashSet::new(); let mut ids = vec![]; for cap in self.matches.iter() { if !seen.contains(&cap.id) { seen.insert(cap.id); ids.push(cap.id); } } ids.sort(); ids } /// Extracts the overall match from each `Captures` match in this test /// and returns it. fn matches(&self) -> Vec<Match> { let mut matches = vec![]; for cap in self.matches.iter() { matches.push(cap.to_match()); } matches } /// Returns the matches expected by this test, including the spans of any /// matching capture groups. fn captures(&self) -> Vec<Captures> { self.matches.clone() } } /// The result of compiling a regex. /// /// In many implementations, the act of matching a regex can be separated from /// the act of compiling a regex. A `CompiledRegex` represents a regex that has /// been compiled and is ready to be used for matching. /// /// The matching implementation is represented by a closure that accepts a /// [`&RegexTest`](RegexTest) and returns a [`TestResult`]. pub struct CompiledRegex { matcher: Option<Box<dyn FnMut(&RegexTest) -> TestResult + 'static>>, } impl CompiledRegex { /// Provide a closure that represents the compiled regex and executes a /// regex match on any `RegexTest`. The `RegexTest` given to the closure /// provided is the exact same `RegexTest` that is used to compile this /// regex. pub fn compiled( matcher: impl FnMut(&RegexTest) -> TestResult + 'static, ) -> CompiledRegex { CompiledRegex { matcher: Some(Box::new(matcher)) } } /// Indicate that tests on this regex should be skipped. This typically /// occurs if the `RegexTest` requires something that an implementation /// does not support. pub fn skip() -> CompiledRegex { CompiledRegex { matcher: None } } /// Returns true if the test runner decided to skip the test when /// attempting to compile the regex. pub fn is_skip(&self) -> bool { self.matcher.is_none() } } impl std::fmt::Debug for CompiledRegex { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let status = match self.matcher { None => "Skip", Some(_) => "Run(...)", }; f.debug_struct("CompiledRegex").field("matcher", &status).finish() } } /// The result of executing a regex search. /// /// When using the test runner, callers must provide a closure that takes /// a `RegexTest` and returns a `TestResult`. The `TestResult` is meant to /// capture the results of matching the haystack against the regex specified by /// the `RegexTest`. /// /// Usually this consists of one or more matches, which can be constructed via /// `TestResult::matches` (for just overall matches) or `TestResult::captures` /// (for matches with capture group spans). But the regex engine may also /// report whether a match exists, or just whether a pattern matched or not. /// That can be done via `TestResult::matched` and `TestResult::which`, /// respectively. #[derive(Debug, Clone)] pub struct TestResult { kind: TestResultKind, } #[derive(Debug, Clone)] enum TestResultKind { Match(bool), Which(Vec<usize>), StartEnd(Vec<Match>), Captures(Vec<Captures>), Skip, Fail { why: String }, } impl TestResult { /// Create a test result that indicates just whether any match was found /// or not. pub fn matched(yes: bool) -> TestResult { TestResult { kind: TestResultKind::Match(yes) } } /// Create a test result that indicates which out of possibly many regexes /// matched the haystack. If `which` is empty, then this is equivalent to /// `TestResult::matched(false)`. /// /// Note that the iterator should consist of pattern IDs, where each /// ID corresponds to a pattern that matches anywhere in the haystack. /// Multiple patterns may match the same region of the haystack. That is, /// this supports overlapping matches. pub fn which<I: IntoIterator<Item = usize>>(it: I) -> TestResult { let mut which: Vec<usize> = it.into_iter().collect(); which.sort(); TestResult { kind: TestResultKind::Which(which) } } /// Create a test result containing a sequence of all matches in the test's /// haystack. This is useful when the regex engine only reports overall /// matches and not the spans of each matching capture group. /// /// If the sequence is empty, then this is equivalent to /// `TestResult::matched(false)`. pub fn matches<I: IntoIterator<Item = Match>>(it: I) -> TestResult { TestResult { kind: TestResultKind::StartEnd(it.into_iter().collect()) } } /// Create a test result containing a sequence of all capturing matches in /// the test's haystack. Each match is a `Captures`, and each `Captures` /// should include the spans of all matching capturing groups. /// /// If the sequence is empty, then this is equivalent to /// `TestResult::matched(false)`. pub fn captures<I: IntoIterator<Item = Captures>>(it: I) -> TestResult { TestResult { kind: TestResultKind::Captures(it.into_iter().collect()) } } /// Indicate that this test should be skipped. It will not be counted as /// a failure. pub fn skip() -> TestResult { TestResult { kind: TestResultKind::Skip } } /// Indicate that this test should be failed for the reason given. /// /// This is useful when a test needs to be failed for reasons that the /// test runner itself cannot check. That is, the test is failed by the /// implementation being tested. pub fn fail(why: &str) -> TestResult { TestResult { kind: TestResultKind::Fail { why: why.to_string() } } } } /// A runner for executing regex tests. /// /// This runner is intended to be used within a Rust unit test, marked with the /// `#[test]` attribute. /// /// A test runner is responsible for running tests against a regex /// implementation. It contains logic for skipping tests and collects test /// results. Typical usage corresponds to calling [`TestRunner::test_iter`] on /// an iterator of `RegexTest`s, and then calling `assert` once done. If any /// tests failed, then `assert` will panic with an error message containing all /// test failures. `assert` must be called before the test completes. /// /// # Skipping tests /// /// If the `REGEX_TEST` environment variable is set, then it may contain /// a comma separated list of substrings. Each substring corresponds to a /// whitelisted item, unless it starts with a `-`, in which case it corresponds /// to a blacklisted item. /// /// If there are any whitelist items, then a test's full name must contain at /// least one of the whitelist substrings in order to be run, and does not /// contain and blacklist substrings. If there are no whitelist substrings, /// then a test is run only when it does not match any blacklist substrings. /// /// The last substring that a test name matches takes precedent. /// /// Callers may also specify explicit whitelist or blacklist substrings using /// the corresponding methods on this type, which has the effect of always /// having those rules in place for that specific test. For example, if you're /// testing a search by building a DFA and then minimizing it, you may want to /// skip tests with bigger regexes, since they could take quite some time to /// run. /// /// Whitelist and blacklist substrings are matched on the full name of each /// test, which typically looks like `group_name/test_name`. /// /// Currently there is no way to escape either a `-` or a `,` in `REGEX_TEST`. /// This usually isn't required because test names usually don't include either /// character. #[derive(Debug)] pub struct TestRunner { include: Vec<IncludePattern>, results: RegexTestResults, expanders: Vec<Expander>, } impl TestRunner { /// Create a new runner for executing tests. /// /// The test runner maintains a full list of tests that have succeeded, /// failed or been skipped. Moreover, the test runner may control which /// tests get run via its whitelist and blacklist. /// /// This returns an error if there was a problem reading the `REGEX_TEST` /// environment variable, which may be set to include or exclude tests. /// See the docs on `TestRunner` for its format. pub fn new() -> Result<TestRunner> { let mut runner = TestRunner { include: vec![], results: RegexTestResults::new(), expanders: vec![], }; for mut substring in read_env(ENV_REGEX_TEST)?.split(",") { substring = substring.trim(); if substring.is_empty() { continue; } if substring.starts_with("-") { runner.blacklist(&substring[1..]); } else { runner.whitelist(substring); } } Ok(runner) } /// Assert that all tests run have either passed or have been skipped. /// /// If any tests have failed, then a panic occurs with a report of all /// failures. /// /// If `REGEX_TEST_VERBOSE` is set to `1`, then a longer report of tests /// that passed, failed or skipped is printed. pub fn assert(&mut self) { self.results.assert(); } /// Whitelist the given substring. /// /// Whitelist and blacklist rules are only applied when /// [`TestRunner::test_iter`] is called. pub fn whitelist(&mut self, substring: &str) -> &mut TestRunner { self.include.push(IncludePattern { blacklist: false, substring: BString::from(substring), }); self } /// Whitelist the given iterator substrings. /// /// This is a convenience routine for calling `whitelist` on each of the /// substrings in the iterator provided. /// /// Whitelist and blacklist rules are only applied when /// [`TestRunner::test_iter`] is called. pub fn whitelist_iter<I, S>(&mut self, substrings: I) -> &mut TestRunner where I: IntoIterator<Item = S>, S: AsRef<str>, { for substring in substrings { self.whitelist(substring.as_ref()); } self } /// Blacklist the given substring. /// /// A blacklisted test is never run, unless a whitelisted substring added /// after the blacklisted substring matches it. /// /// Whitelist and blacklist rules are only applied when /// [`TestRunner::test_iter`] is called. pub fn blacklist(&mut self, substring: &str) -> &mut TestRunner { self.include.push(IncludePattern { blacklist: true, substring: BString::from(substring), }); self } /// Blacklist the given iterator substrings. /// /// A blacklisted test is never run, unless a whitelisted substring added /// after the blacklisted substring matches it. /// /// This is a convenience routine for calling `blacklist` on each of the /// substrings in the iterator provided. /// /// Whitelist and blacklist rules are only applied when /// [`TestRunner::test_iter`] is called. pub fn blacklist_iter<I, S>(&mut self, substrings: I) -> &mut TestRunner where I: IntoIterator<Item = S>, S: AsRef<str>, { for substring in substrings { self.blacklist(substring.as_ref()); } self } /// Set an expansion predicate that appends each entry in /// `additional_names` to the end the name for every test that `predicate` /// returns true. Moreover, the corresponding additional name is made /// available via [`RegexTest::additional_name`]. /// /// This permits implementors to create multiple copies of each test, and /// then do specifically different tasks with each, while making it so each /// test is distinct. /// /// For example, you might write something like this: /// /// ```ignore /// TestRunner::new()? /// .expand(&["is_match", "find"], |t| t.compiles()) /// .test_iter(tests, compiler) /// .assert() /// ``` /// /// where each test that is expected to have a regex compile gets copied /// with `/is_match` and `/find` appends to the end of its name. Then, in /// your own test runner, you can inspect [`RegexTest::additional_name`] to /// decide what to do. In the case of `is_match`, you might test your regex /// engines "has a match" API, which might exercise different logic than /// your "find where the matches are" API. pub fn expand<S: AsRef<str>>( &mut self, additional_names: &[S], predicate: impl FnMut(&RegexTest) -> bool + 'static, ) -> &mut TestRunner { self.expanders.push(Expander { predicate: Box::new(predicate), additional_names: additional_names .iter() .map(|s| s.as_ref().to_string()) .collect(), }); self } /// Run all of the given tests using the given regex compiler. /// /// The compiler given is a closure that accepts a /// [`&RegexTest`](RegexTest) and a sequence of patterns, and returns (if /// successful) a [`CompiledRegex`] which can execute a search. /// /// Note that if there are test failures, this merely _collects_ them. Use /// [`TestRunner::assert`] to fail the current test by panicking if there /// any failures. /// /// Typically, one provides [`RegexTests::iter`] as the iterator of /// `RegexTest` values. pub fn test_iter<I, T>( &mut self, it: I, mut compile: impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex>, ) -> &mut TestRunner where I: IntoIterator<Item = T>, T: Borrow<RegexTest>, { for test in it { let test = test.borrow(); let mut additional = vec![]; for expander in &mut self.expanders { if (expander.predicate)(test) { for name in expander.additional_names.iter() { additional.push(test.with_additional_name(name)); } break; } } if additional.is_empty() { additional.push(test.to_owned()); } for test in &additional { if self.should_skip(test) { self.results.skip(test); continue; } self.test(test, |regexes| compile(test, regexes)); } } self } /// Run a single test. /// /// This records the result of running the test in this runner. This does /// not fail the test immediately if the given regex test fails. Instead, /// this is only done when the `assert` method is called. /// /// Note that using this method bypasses any whitelist or blacklist applied /// to this runner. Whitelisted (and blacklisted) substrings are only /// applied when using `test_iter`. pub fn test( &mut self, test: &RegexTest, mut compile: impl FnMut(&[String]) -> Result<CompiledRegex>, ) -> &mut TestRunner { let mut compiled = match safe(|| compile(test.regexes())) { Err(msg) => { // Regex tests should never panic. It's auto-fail if they do. self.results.fail( test, RegexTestFailureKind::UnexpectedPanicCompile(msg), ); return self; } Ok(Ok(compiled)) => compiled, Ok(Err(err)) => { if !test.compiles() { self.results.pass(test); } else { self.results.fail( test, RegexTestFailureKind::CompileError { err }, ); } return self; } }; // We fail the test if we didn't expect the regex to compile. However, // it's possible the caller decided to skip the test when attempting // to compile the regex, so we check for that. If the compiled regex // is marked as skipped, then 'test.test(..)' below handles it // correctly. if !test.compiles() && !compiled.is_skip() { self.results.fail(test, RegexTestFailureKind::NoCompileError); return self; } let result = match safe(|| test.test(&mut compiled)) { Ok(result) => result, Err(msg) => { self.results.fail( test, RegexTestFailureKind::UnexpectedPanicSearch(msg), ); return self; } }; match result.kind { TestResultKind::Match(yes) => { if yes == test.is_match() { self.results.pass(test); } else { self.results.fail(test, RegexTestFailureKind::IsMatch); } } TestResultKind::Which(which) => { if which != test.which_matches() { self.results .fail(test, RegexTestFailureKind::Many { got: which }); } else { self.results.pass(test); } } TestResultKind::StartEnd(matches) => { let expected = test.matches(); if expected != matches { self.results.fail( test, RegexTestFailureKind::StartEnd { got: matches }, ); } else { self.results.pass(test); } } TestResultKind::Captures(caps) => { let expected = test.captures(); if expected != caps { self.results.fail( test, RegexTestFailureKind::Captures { got: caps }, ); } else { self.results.pass(test); } } TestResultKind::Skip => { self.results.skip(test); } TestResultKind::Fail { why } => { self.results .fail(test, RegexTestFailureKind::UserFailure { why }); } } self } /// Return true if and only if the given test should be skipped. fn should_skip(&self, test: &RegexTest) -> bool { if self.include.is_empty() { return false; } // If we don't have any whitelist patterns, then the test will be run // unless it is blacklisted. Otherwise, if there are whitelist // patterns, then the test must match at least one of them. let mut skip = self.include.iter().any(|pat| !pat.blacklist); for pat in &self.include { if test.full_name().as_bytes().contains_str(&pat.substring) { skip = pat.blacklist; } } skip } } #[derive(Debug)] struct IncludePattern { blacklist: bool, substring: BString, } struct Expander { predicate: Box<dyn FnMut(&RegexTest) -> bool>, additional_names: Vec<String>, } impl std::fmt::Debug for Expander { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("Expander") .field("predicate", &"<FnMut(..)>") .field("additional_names", &self.additional_names) .finish() } } /// A collection of test results, corresponding to passed, skipped and failed /// tests. #[derive(Debug)] struct RegexTestResults { pass: Vec<RegexTestResult>, fail: Vec<RegexTestFailure>, skip: Vec<RegexTestResult>, } /// A test that passed or skipped, along with its specific result. #[derive(Debug)] struct RegexTestResult { test: RegexTest, } /// A test that failed along with the reason why. #[derive(Debug)] struct RegexTestFailure { test: RegexTest, kind: RegexTestFailureKind, } /// Describes the nature of the failed test. #[derive(Debug)] enum RegexTestFailureKind { /// UserFailure indicates that the test failed because the test function /// explicitly failed it for the reason in the message given. UserFailure { why: String }, /// This occurs when the test expected a match (or didn't expect a match), /// but the actual regex implementation didn't match (or did match). IsMatch, /// This occurs when a set of regexes is tested, and the matching regexes /// returned by the regex implementation don't match the expected matching /// regexes. This error contains the indices of the regexes that matched. Many { got: Vec<usize> }, /// This occurs when a single regex is used to find all non-overlapping /// matches in a haystack, where the result did not match what was /// expected. This reports the incorrect matches returned by the regex /// implementation under test. StartEnd { got: Vec<Match> }, /// Like StartEnd, but for capturing groups. Captures { got: Vec<Captures> }, /// This occurs when the test expected the regex to fail to compile, but it /// compiled successfully. NoCompileError, /// This occurs when the test expected the regex to compile successfully, /// but it failed to compile. CompileError { err: anyhow::Error }, /// While compiling, a panic occurred. If possible, the panic message /// is captured. UnexpectedPanicCompile(String), /// While searching, a panic occurred. If possible, the panic message /// is captured. UnexpectedPanicSearch(String), } impl RegexTestResults { fn new() -> RegexTestResults { RegexTestResults { pass: vec![], fail: vec![], skip: vec![] } } fn pass(&mut self, test: &RegexTest) { self.pass.push(RegexTestResult { test: test.clone() }); } fn fail(&mut self, test: &RegexTest, kind: RegexTestFailureKind) { self.fail.push(RegexTestFailure { test: test.clone(), kind }); } fn skip(&mut self, test: &RegexTest) { self.skip.push(RegexTestResult { test: test.clone() }); } fn assert(&self) { if read_env(ENV_REGEX_TEST_VERBOSE).map_or(false, |s| s == "1") { self.verbose(); } if self.fail.is_empty() { return; } let failures = self .fail .iter() .map(|f| f.to_string()) .collect::<Vec<String>>() .join("\n\n"); panic!( "found {} failures:\n{}\n{}\n{}\n\n\ Set the REGEX_TEST environment variable to filter tests, \n\ e.g., REGEX_TEST=foo,-foo2 runs every test whose name contains \n\ foo but not foo2\n\n", self.fail.len(), "~".repeat(79), failures.trim(), "~".repeat(79), ) } fn verbose(&self) { println!("{}", "~".repeat(79)); for t in &self.skip { println!("skip: {}", t.full_name()); } for t in &self.pass { println!("pass: {}", t.full_name()); } for t in &self.fail { println!("FAIL: {}", t.test.full_name()); } println!( "\npassed: {}, skipped: {}, failed: {}", self.pass.len(), self.skip.len(), self.fail.len() ); println!("{}", "~".repeat(79)); } } impl RegexTestResult { fn full_name(&self) -> String { self.test.full_name().to_string() } } impl RegexTestFailure { fn full_name(&self) -> String { self.test.full_name().to_string() } } impl std::fmt::Display for RegexTestFailure { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "{}: {}\n\ pattern: {:?}\n\ haystack: {:?}", self.full_name(), self.kind.fmt(&self.test)?, self.test.regexes(), self.test.haystack().as_bstr(), )?; Ok(()) } } impl RegexTestFailureKind { fn fmt(&self, test: &RegexTest) -> Result<String, std::fmt::Error> { use std::fmt::Write; let mut buf = String::new(); match *self { RegexTestFailureKind::UserFailure { ref why } => { write!(buf, "failed by implementor because: {}", why)?; } RegexTestFailureKind::IsMatch => { if test.is_match() { write!(buf, "expected match, but none found")?; } else { write!(buf, "expected no match, but found a match")?; } } RegexTestFailureKind::Many { ref got } => { write!( buf, "expected regexes {:?} to match, but found {:?}", test.which_matches(), got )?; } RegexTestFailureKind::StartEnd { ref got } => { write!( buf, "did not find expected matches\n\ expected: {:?}\n \ got: {:?}", test.matches(), got, )?; } RegexTestFailureKind::Captures { ref got } => { write!( buf, "expected to find {:?} captures, but got {:?}", test.captures(), got, )?; } RegexTestFailureKind::NoCompileError => { write!(buf, "expected regex to NOT compile, but it did")?; } RegexTestFailureKind::CompileError { ref err } => { write!(buf, "expected regex to compile, failed: {}", err)?; } RegexTestFailureKind::UnexpectedPanicCompile(ref msg) => { write!(buf, "got unexpected panic while compiling:\n{}", msg)?; } RegexTestFailureKind::UnexpectedPanicSearch(ref msg) => { write!(buf, "got unexpected panic while searching:\n{}", msg)?; } } Ok(buf) } } /// An iterator over regex tests. /// /// This iterator is created by the [`RegexTests::iter`] method. #[derive(Debug)] pub struct RegexTestsIter<'a>(std::slice::Iter<'a, RegexTest>); impl<'a> Iterator for RegexTestsIter<'a> { type Item = &'a RegexTest; fn next(&mut self) -> Option<&'a RegexTest> { self.0.next() } } /// Represents either a single regex or a list of regexes in a TOML. #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] #[serde(untagged)] enum RegexesFormat { Single(String), Many(Vec<String>), } impl RegexesFormat { fn patterns(&self) -> &[String] { match *self { RegexesFormat::Single(ref pat) => std::slice::from_ref(pat), RegexesFormat::Many(ref pats) => pats, } } } /// Captures represents a single group of captured matches from a regex search. /// /// There is always at least 1 group, and the first group is always present and /// corresponds to the overall match. #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] #[serde(try_from = "CapturesFormat")] pub struct Captures { /// The ID of the regex that matched. /// /// The ID is the index of the regex provided to the regex compiler, /// starting from `0`. In the case of a single regex search, the only /// possible ID is `0`. id: usize, /// The capturing groups that matched, along with the match offsets for /// each. The first group should always be non-None, as it corresponds to /// the overall match. /// /// This should either have length 1 (when not capturing group offsets are /// included in the tes tresult) or it should have length equal to the /// number of capturing groups in the regex pattern. groups: Vec<Option<Span>>, } impl Captures { /// Create a new set of captures for a single match of a regex. /// /// If available, iterator should provide items for every capturing group /// in the regex, including the 0th capturing group corresponding to the /// entire match. At minimum, the 0th capturing group should be provided. /// /// If a capturing group did not participate in the match, then a `None` /// value should be used. (The 0th capturing group should never be `None`.) /// /// If the iterator yields no elements or the first group is `None`, then /// this returns an error. /// /// The `id` should be the ID of the pattern that matched. This is always /// `0` for single-pattern regexes. Otherwise, the ID of a pattern starts /// at `0` and is incremented by 1 for each subsequent pattern. /// /// Note that there are possibly more convenient and infallible `From` /// impls for converting a `Match` or a `Span` into a `Captures`. pub fn new<I: IntoIterator<Item = Option<Span>>>( id: usize, it: I, ) -> Result<Captures> { let groups: Vec<Option<Span>> = it.into_iter().collect(); if groups.is_empty() { bail!("captures must contain at least one group"); } else if groups[0].is_none() { bail!("first group (index 0) of captures must be non-None"); } Ok(Captures { id, groups }) } /// Returns the ID of the pattern that matched. /// /// For any single pattern regexes, this should always be zero. pub fn id(&self) -> usize { self.id } /// Returns a slice of the underlying spans, each group corresponding to /// the (possibly) matched span. The first group in the slice returned /// is guaranteed to correspond to the overall match span and is thus /// non-`None`. All other groups may be `None`. Similarly, the slice is /// guaranteed to have length at least 1. pub fn groups(&self) -> &[Option<Span>] { &self.groups } /// Returns the number of groups (including the first) in these captures. /// /// The length returned is guaranteed to be greater than zero. pub fn len(&self) -> usize { self.groups.len() } /// Returns the overall match, including the pattern ID, for this group /// of captures. pub fn to_match(&self) -> Match { Match { id: self.id(), span: self.to_span() } } /// Returns the overall match span for this group of captures. pub fn to_span(&self) -> Span { // This is OK because a Captures value must always have at least one // group where the first group always corresponds to match offsets. self.groups[0].unwrap() } } /// Converts a plain `Match` to a `Captures` value, where the match corresponds /// to the first and only group in `Captures`. impl From<Match> for Captures { fn from(m: Match) -> Captures { Captures { id: m.id, groups: vec![Some(m.span)] } } } /// Converts a plain `Span` to a `Captures` value, where the span corresponds to /// the first and only group in `Captures`. Since a `Span` does not contain a /// pattern ID, the pattern ID used in this conversion is always `0`. impl From<Span> for Captures { fn from(sp: Span) -> Captures { Captures { id: 0, groups: vec![Some(sp)] } } } /// Represents the actual 'captures' key format more faithfully such that /// Serde can deserialize it. /// /// Namely, we need a way to represent a 'None' value inside a TOML array, and /// TOML has no 'null' value. So we make '[]' be 'None', and we use 'MaybeSpan' /// to recognize it. #[derive(Deserialize)] #[serde(untagged)] enum CapturesFormat { Span([usize; 2]), Match { id: usize, span: [usize; 2] }, Spans(Vec<MaybeSpan>), Captures { id: usize, spans: Vec<MaybeSpan> }, } impl TryFrom<CapturesFormat> for Captures { type Error = anyhow::Error; fn try_from(data: CapturesFormat) -> Result<Captures> { match data { CapturesFormat::Span([start, end]) => { Ok(Captures { id: 0, groups: vec![Some(Span { start, end })] }) } CapturesFormat::Match { id, span: [start, end] } => { Ok(Captures { id, groups: vec![Some(Span { start, end })] }) } CapturesFormat::Spans(spans) => { Captures::new(0, spans.into_iter().map(|s| s.into_option())) } CapturesFormat::Captures { id, spans } => { Captures::new(id, spans.into_iter().map(|s| s.into_option())) } } } } /// A single match, consisting of the pattern that matched and its span. #[derive(Clone, Copy, Eq, PartialEq)] pub struct Match { /// The ID of the pattern that matched. /// /// This is always `0` for single-pattern regexes. Otherwise, patterns /// start at `0` and count upwards in increments of `1`. pub id: usize, /// The span of the overall match. pub span: Span, } impl std::fmt::Debug for Match { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "Match({:?}: {:?})", self.id, self.span) } } /// A span of contiguous bytes, from start to end, represented via byte /// offsets. /// /// The range is inclusive at the beginning and exclusive at the end. #[derive(Clone, Copy, Deserialize, Eq, PartialEq)] pub struct Span { /// The starting byte offset of the match. pub start: usize, /// The ending byte offset of the match. pub end: usize, } impl std::fmt::Debug for Span { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}..{:?}", self.start, self.end) } } /// Represents a single span, either present or empty. /// /// An empty span is spelled `[]` in TOML, and a present span is spelled `[m, /// n]`. #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] #[serde(untagged)] enum MaybeSpan { None([usize; 0]), Some([usize; 2]), } impl MaybeSpan { /// Converts this TOML representation of a possibly absent span to a proper /// `Option<Span>`. fn into_option(self) -> Option<Span> { match self { MaybeSpan::None(_) => None, MaybeSpan::Some([start, end]) => Some(Span { start, end }), } } } /// The match semantics to use for a search. /// /// When not specified in a test, the default is `MatchKind::LeftmostFirst`. #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq)] #[serde(rename_all = "kebab-case")] pub enum MatchKind { /// All possible matches should be reported. /// /// Usually this makes it impossible for non-greedy repetition operators /// to exist. That is, they behave as greedy repetition operators. All, /// Report only the leftmost match. When there are multiple leftmost /// matches that start at the same position, prefer the one that comes /// "first" in the pattern. For example, `sam|samwise` matches `sam` in /// `samwise`. /// /// This typically corresponds to the semantics implemented by backtracking /// engines. LeftmostFirst, /// Report only the leftmost match. When there are multiple leftmost /// matches that start at the same position, prefer the one the longest /// match. For example, `sam|samwise` matches `samwise` in `samwise`. /// /// This typically corresponds to the semantics implemented by POSIX /// engines. LeftmostLongest, } impl Default for MatchKind { fn default() -> MatchKind { MatchKind::LeftmostFirst } } /// Represents the type of search to perform. /// /// When not specified in a test, the default is `SearchKind::Leftmost`. #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq)] #[serde(rename_all = "kebab-case")] pub enum SearchKind { /// Report matches as soon as they are found. /// /// This is somewhat tricky to test, as this semantic is specified in terms /// of whatever the regex engine can do. For example, an automata oriented /// engine might be able to report a match earlier than a backtracking /// engine. Earliest, /// A standard leftmost search, returning either the leftmost-first or /// leftmost-longest match. Generally speaking, it doesn't make sense to /// use this type of search in combination with [`MatchKind::All`]. Leftmost, /// Return all possible matches, including ones that overlap. Typically /// this search kind is used in combination with [`MatchKind::All`]. Overlapping, } impl Default for SearchKind { fn default() -> SearchKind { SearchKind::Leftmost } } /// Read the environment variable given. If it doesn't exist, then return an /// empty string. Otherwise, check that it is valid UTF-8. If it isn't, return /// a useful error message. fn read_env(var: &str) -> Result<String> { let val = match std::env::var_os(var) { None => return Ok("".to_string()), Some(val) => val, }; let val = val.into_string().map_err(|os| { anyhow::anyhow!( "invalid UTF-8 in env var {}={:?}", var, Vec::from_os_str_lossy(&os) ) })?; Ok(val) } /// Runs the given closure such that any panics are caught and converted into /// errors. If the panic'd value could not be converted to a known error type, /// then a generic string error message is used. /// /// This is useful for use inside the test runner such that bugs for certain /// tests don't prevent other tests from running. fn safe<T, F>(fun: F) -> Result<T, String> where F: FnOnce() -> T, { use std::panic; panic::catch_unwind(panic::AssertUnwindSafe(fun)).map_err(|any_err| { // Extract common types of panic payload: // panic and assert produce &str or String if let Some(&s) = any_err.downcast_ref::<&str>() { s.to_owned() } else if let Some(s) = any_err.downcast_ref::<String>() { s.to_owned() } else { "UNABLE TO SHOW RESULT OF PANIC.".to_owned() } }) } /// A function to set some boolean fields to a default of 'true'. We use a /// function so that we can hand a path to it to Serde. fn default_true() -> bool { true } #[cfg(test)] mod tests { use super::*; #[test] fn err_no_regexes() { let data = r#" [[test]] name = "foo" haystack = "lib.rs" matches = true case-insensitive = true "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn err_unknown_field() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = true something = 0 "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn err_no_matches() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn load_match() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [[0, 6]] compiles = false anchored = true case-insensitive = true unicode = false utf8 = false "#; let mut tests = RegexTests::new(); tests.load_slice("test", data.as_bytes()).unwrap(); let t0 = &tests.tests[0]; assert_eq!("test", t0.group()); assert_eq!("foo", t0.name()); assert_eq!("test/foo", t0.full_name()); assert_eq!(&[".*.rs"], t0.regexes()); assert_eq!(true, t0.is_match()); assert_eq!(vec![0], t0.which_matches()); assert!(!t0.compiles()); assert!(t0.anchored()); assert!(t0.case_insensitive()); assert!(!t0.unicode()); assert!(!t0.utf8()); } #[test] fn load_which_matches() { let data = r#" [[test]] name = "foo" regex = [".*.rs", ".*.toml"] haystack = "lib.rs" matches = [ { id = 0, spans = [[0, 0]] }, { id = 2, spans = [[0, 0]] }, { id = 5, spans = [[0, 0]] }, ] "#; let mut tests = RegexTests::new(); tests.load_slice("test", data.as_bytes()).unwrap(); let t0 = &tests.tests[0]; assert_eq!(&[".*.rs", ".*.toml"], t0.regexes()); assert_eq!(true, t0.is_match()); assert_eq!(vec![0, 2, 5], t0.which_matches()); assert!(t0.compiles()); assert!(!t0.anchored()); assert!(!t0.case_insensitive()); assert!(t0.unicode()); assert!(t0.utf8()); } #[test] fn load_spans() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [[0, 2], [5, 10]] "#; let mut tests = RegexTests::new(); tests.load_slice("test", data.as_bytes()).unwrap(); let spans = vec![Span { start: 0, end: 2 }, Span { start: 5, end: 10 }]; let t0 = &tests.tests[0]; assert_eq!(t0.regexes(), &[".*.rs"]); assert_eq!(t0.is_match(), true); assert_eq!(t0.which_matches(), &[0]); assert_eq!( t0.matches(), vec![ Match { id: 0, span: spans[0] }, Match { id: 0, span: spans[1] }, ] ); assert_eq!( t0.captures(), vec![ Captures::new(0, vec![Some(spans[0])]).unwrap(), Captures::new(0, vec![Some(spans[1])]).unwrap(), ] ); } #[test] fn load_capture_spans() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [ [[0, 15], [5, 10], [], [13, 14]], [[20, 30], [22, 24], [25, 27], []], ] "#; let mut tests = RegexTests::new(); tests.load_slice("test", data.as_bytes()).unwrap(); let t0 = &tests.tests[0]; assert_eq!(t0.regexes(), &[".*.rs"]); assert_eq!(t0.is_match(), true); assert_eq!(t0.which_matches(), &[0]); assert_eq!( t0.matches(), vec![ Match { id: 0, span: Span { start: 0, end: 15 } }, Match { id: 0, span: Span { start: 20, end: 30 } }, ] ); assert_eq!( t0.captures(), vec![ Captures::new( 0, vec![ Some(Span { start: 0, end: 15 }), Some(Span { start: 5, end: 10 }), None, Some(Span { start: 13, end: 14 }), ] ) .unwrap(), Captures::new( 0, vec![ Some(Span { start: 20, end: 30 }), Some(Span { start: 22, end: 24 }), Some(Span { start: 25, end: 27 }), None, ] ) .unwrap(), ] ); } #[test] fn fail_spans_empty1() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [ [], ] "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn fail_spans_empty2() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [ [[]], ] "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn fail_spans_empty3() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [ [[], [0, 2]], ] "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn fail_captures_empty1() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [ { id = 0, spans = [] }, ] "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn fail_captures_empty2() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [ { id = 0, spans = [[]] }, ] "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } #[test] fn fail_captures_empty3() { let data = r#" [[test]] name = "foo" regex = ".*.rs" haystack = "lib.rs" matches = [ { id = 0, spans = [[], [0, 2]] }, ] "#; let mut tests = RegexTests::new(); assert!(tests.load_slice("test", data.as_bytes()).is_err()); } } <file_sep>/regex-lite/src/lib.rs /*! This crate provides a **lightweight** regex engine for searching strings. The regex syntax supported by this crate is nearly identical to what is found in the [`regex`](https://docs.rs/regex) crate. Like the `regex` crate, all regex searches in this crate have worst case `O(m * n)` time complexity, where `m` is proportional to the size of the regex and `n` is proportional to the size of the string being searched. The principal difference between the `regex` and `regex-lite` crates is that the latter prioritizes smaller binary sizes and shorter Rust compile times over performance and functionality. As a result, regex searches in this crate are typically substantially slower than what is provided by the `regex` crate. Moreover, this crate only has the most basic level of Unicode support: it matches codepoint by codepoint but otherwise doesn't support Unicode case insensivity or things like `\p{Letter}`. In exchange, this crate contributes far less to binary size and compiles much more quickly. If you just want API documentation, then skip to the [`Regex`] type. Otherwise, here's a quick example showing one way of parsing the output of a grep-like program: ```rust use regex_lite::Regex; let re = Regex::new(r"(?m)^([^:]+):([0-9]+):(.+)$").unwrap(); let hay = "\ path/to/foo:54:Blue Harvest path/to/bar:90:Something, Something, Something, Dark Side path/to/baz:3:It's a Trap! "; let mut results = vec![]; for (_, [path, lineno, line]) in re.captures_iter(hay).map(|c| c.extract()) { results.push((path, lineno.parse::<u64>()?, line)); } assert_eq!(results, vec![ ("path/to/foo", 54, "Blue Harvest"), ("path/to/bar", 90, "Something, Something, Something, Dark Side"), ("path/to/baz", 3, "It's a Trap!"), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Overview The primary type in this crate is a [`Regex`]. Its most important methods are as follows: * [`Regex::new`] compiles a regex using the default configuration. A [`RegexBuilder`] permits setting a non-default configuration. (For example, case insensitive matching, verbose mode and others.) * [`Regex::is_match`] reports whether a match exists in a particular haystack. * [`Regex::find`] reports the byte offsets of a match in a haystack, if one exists. [`Regex::find_iter`] returns an iterator over all such matches. * [`Regex::captures`] returns a [`Captures`], which reports both the byte offsets of a match in a haystack and the byte offsets of each matching capture group from the regex in the haystack. [`Regex::captures_iter`] returns an iterator over all such matches. Otherwise, this top-level crate documentation is organized as follows: * [Usage](#usage) shows how to add the `regex` crate to your Rust project. * [Examples](#examples) provides a limited selection of regex search examples. * [Differences with the regex crate](#differences-with-the-regex-crate) provides a precise description of how `regex-lite` differs from `regex`. * [Syntax](#syntax) enumerates the specific regex syntax supported by this crate. * [Untrusted input](#untrusted-input) discusses how this crate deals with regex patterns or haystacks that are untrusted. # Usage The `regex-lite` crate is [on crates.io](https://crates.io/crates/regex-lite) and can be used by adding `regex-lite` to your dependencies in your project's `Cargo.toml`. Or more simply, just run `cargo add regex-lite`. Here is a complete example that creates a new Rust project, adds a dependency on `regex-lite`, creates the source code for a regex search and then runs the program. First, create the project in a new directory: ```text $ mkdir regex-example $ cd regex-example $ cargo init ``` Second, add a dependency on `regex`: ```text $ cargo add regex-lite ``` Third, edit `src/main.rs`. Delete what's there and replace it with this: ``` use regex_lite::Regex; fn main() { let re = Regex::new(r"Hello (?<name>\w+)!").unwrap(); let Some(caps) = re.captures("Hello Murphy!") else { println!("no match!"); return; }; println!("The name is: {}", &caps["name"]); } ``` Fourth, run it with `cargo run`: ```text $ cargo run Compiling regex-lite v0.1.0 Compiling regex-example v0.1.0 (/tmp/regex-example) Finished dev [unoptimized + debuginfo] target(s) in 4.22s Running `target/debug/regex-example` The name is: Murphy ``` The first time you run the program will show more output like above. But subsequent runs shouldn't have to re-compile the dependencies. # Examples This section provides a few examples, in tutorial style, showing how to search a haystack with a regex. There are more examples throughout the API documentation. Before starting though, it's worth defining a few terms: * A **regex** is a Rust value whose type is `Regex`. We use `re` as a variable name for a regex. * A **pattern** is the string that is used to build a regex. We use `pat` as a variable name for a pattern. * A **haystack** is the string that is searched by a regex. We use `hay` as a variable name for a haystack. Sometimes the words "regex" and "pattern" are used interchangeably. General use of regular expressions in this crate proceeds by compiling a **pattern** into a **regex**, and then using that regex to search, split or replace parts of a **haystack**. ### Example: find a middle initial We'll start off with a very simple example: a regex that looks for a specific name but uses a wildcard to match a middle initial. Our pattern serves as something like a template that will match a particular name with *any* middle initial. ```rust use regex_lite::Regex; // We use 'unwrap()' here because it would be a bug in our program if the // pattern failed to compile to a regex. Panicking in the presence of a bug // is okay. let re = Regex::new(r"Homer (.)\. Simpson").unwrap(); let hay = "<NAME>"; let Some(caps) = re.captures(hay) else { return }; assert_eq!("J", &caps[1]); ``` There are a few things worth noticing here in our first example: * The `.` is a special pattern meta character that means "match any single character except for new lines." (More precisely, in this crate, it means "match any UTF-8 encoding of any Unicode scalar value other than `\n`.") * We can match an actual `.` literally by escaping it, i.e., `\.`. * We use Rust's [raw strings] to avoid needing to deal with escape sequences in both the regex pattern syntax and in Rust's string literal syntax. If we didn't use raw strings here, we would have had to use `\\.` to match a literal `.` character. That is, `r"\."` and `"\\."` are equivalent patterns. * We put our wildcard `.` instruction in parentheses. These parentheses have a special meaning that says, "make whatever part of the haystack matches within these parentheses available as a capturing group." After finding a match, we access this capture group with `&caps[1]`. [raw strings]: https://doc.rust-lang.org/stable/reference/tokens.html#raw-string-literals Otherwise, we execute a search using `re.captures(hay)` and return from our function if no match occurred. We then reference the middle initial by asking for the part of the haystack that matched the capture group indexed at `1`. (The capture group at index 0 is implicit and always corresponds to the entire match. In this case, that's `<NAME>`.) ### Example: named capture groups Continuing from our middle initial example above, we can tweak the pattern slightly to give a name to the group that matches the middle initial: ```rust use regex_lite::Regex; // Note that (?P<middle>.) is a different way to spell the same thing. let re = Regex::new(r"Homer (?<middle>.)\. Simpson").unwrap(); let hay = "<NAME>"; let Some(caps) = re.captures(hay) else { return }; assert_eq!("J", &caps["middle"]); ``` Giving a name to a group can be useful when there are multiple groups in a pattern. It makes the code referring to those groups a bit easier to understand. ### Example: validating a particular date format This examples shows how to confirm whether a haystack, in its entirety, matches a particular date format: ```rust use regex_lite::Regex; let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); assert!(re.is_match("2010-03-14")); ``` Notice the use of the `^` and `$` anchors. In this crate, every regex search is run with an implicit `(?s:.)*?` at the beginning of its pattern, which allows the regex to match anywhere in a haystack. Anchors, as above, can be used to ensure that the full haystack matches a pattern. ### Example: finding dates in a haystack In the previous example, we showed how one might validate that a haystack, in its entirety, corresponded to a particular date format. But what if we wanted to extract all things that look like dates in a specific format from a haystack? To do this, we can use an iterator API to find all matches (notice that we've removed the anchors): ```rust use regex_lite::Regex; let re = Regex::new(r"\d{4}-\d{2}-\d{2}").unwrap(); let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; // 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. let dates: Vec<&str> = re.find_iter(hay).map(|m| m.as_str()).collect(); assert_eq!(dates, vec![ "1865-04-14", "1881-07-02", "1901-09-06", "1963-11-22", ]); ``` We can also iterate over [`Captures`] values instead of [`Match`] values, and that in turn permits accessing each component of the date via capturing groups: ```rust use regex_lite::Regex; let re = Regex::new(r"(?<y>\d{4})-(?<m>\d{2})-(?<d>\d{2})").unwrap(); let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; // 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { // The unwraps are okay because every capture group must match if the whole // regex matches, and in this context, we know we have a match. // // Note that we use `caps.name("y").unwrap().as_str()` instead of // `&caps["y"]` because the the lifetime of the former is the same as the // lifetime of `hay` above, but the lifetime of the latter is tied to the // lifetime of `caps` due to how the `Index` trait is defined. let year = caps.name("y").unwrap().as_str(); let month = caps.name("m").unwrap().as_str(); let day = caps.name("d").unwrap().as_str(); (year, month, day) }).collect(); assert_eq!(dates, vec![ ("1865", "04", "14"), ("1881", "07", "02"), ("1901", "09", "06"), ("1963", "11", "22"), ]); ``` ### Example: simpler capture group extraction One can use [`Captures::extract`] to make the code from the previous example a bit simpler in this case: ```rust use regex_lite::Regex; let re = Regex::new(r"(\d{4})-(\d{2})-(\d{2})").unwrap(); let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { let (_, [year, month, day]) = caps.extract(); (year, month, day) }).collect(); assert_eq!(dates, vec![ ("1865", "04", "14"), ("1881", "07", "02"), ("1901", "09", "06"), ("1963", "11", "22"), ]); ``` `Captures::extract` works by ensuring that the number of matching groups match the number of groups requested via the `[year, month, day]` syntax. If they do, then the substrings for each corresponding capture group are automatically returned in an appropriately sized array. Rust's syntax for pattern matching arrays does the rest. ### Example: replacement with named capture groups Building on the previous example, perhaps we'd like to rearrange the date formats. This can be done by finding each match and replacing it with something different. The [`Regex::replace_all`] routine provides a convenient way to do this, including by supporting references to named groups in the replacement string: ```rust use regex_lite::Regex; let re = Regex::new(r"(?<y>\d{4})-(?<m>\d{2})-(?<d>\d{2})").unwrap(); let before = "1973-01-05, 1975-08-25 and 1980-10-18"; let after = re.replace_all(before, "$m/$d/$y"); assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); ``` The replace methods are actually polymorphic in the replacement, which provides more flexibility than is seen here. (See the documentation for [`Regex::replace`] for more details.) ### Example: verbose mode When your regex gets complicated, you might consider using something other than regex. But if you stick with regex, you can use the `x` flag to enable insignificant whitespace mode or "verbose mode." In this mode, whitespace is treated as insignificant and one may write comments. This may make your patterns easier to comprehend. ```rust use regex_lite::Regex; let re = Regex::new(r"(?x) (?P<y>\d{4}) # the year - (?P<m>\d{2}) # the month - (?P<d>\d{2}) # the day ").unwrap(); let before = "1973-01-05, 1975-08-25 and 1980-10-18"; let after = re.replace_all(before, "$m/$d/$y"); assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); ``` If you wish to match against whitespace in this mode, you can still use `\s`, `\n`, `\t`, etc. For escaping a single space character, you can escape it directly with `\ `, use its hex character code `\x20` or temporarily disable the `x` flag, e.g., `(?-x: )`. # Differences with the `regex` crate As mentioned in the introduction above, the purpose of this crate is to prioritize small binary sizes and shorter Rust compilation times as much as possible. Namely, while the `regex` crate tends to eschew both binary size and compilation time in favor of faster searches and features, the `regex-lite` crate gives up faster searches and some functionality in exchange for smaller binary sizes and faster compilation times. The precise set of differences at the syntax level: * The Perl character classes are limited to ASCII codepoints. That is, `\d` is `[0-9]`, `\s` is `[\t\n\v\f\r ]` and `\w` is `[0-9A-Za-z_]`. * Unicode character classes of the form `\p{...}` and `\P{...}` are not supported at all. Note though that things like `[^β]` are still supported and will match any Unicode scalar value except for `β`. * Case insensitive searching is limited to ASCII case insensitivity. * Character class set operations other than union are not supported. That is, difference (`--`), intersection (`&&`) and symmetric difference (`~~`) are not available. These tend to be most useful with Unicode classes, which also aren't available. * Opt-in octal support is not available in this crate. And now at the API level: * Currently, this crate only supports searching `&str`. It does not have APIs for searching `&[u8]` haystacks, although it is planned to add these in the future if there's demand. * There is no `RegexSet` in this crate and there are no plans to add it. * The `Error` type in this crate is completely opaque. Other than these things, the `regex-lite` crate is intended to be a drop-in replacement for the `regex` crate. In most cases, you can just replace `use regex::Regex;` with `use regex_lite::Regex;` and everything will work. (Unless you're depending on Unicode support in your regexes.) # Syntax The syntax supported in this crate is documented below. ### Matching one character <pre class="rust"> . any character except new line (includes new line with s flag) [0-9] any ASCII digit \d digit ([0-9]) \D not digit </pre> ### Character classes <pre class="rust"> [xyz] A character class matching either x, y or z (union). [^xyz] A character class matching any character except x, y and z. [a-z] A character class matching any character in range a-z. [[:alpha:]] ASCII character class ([A-Za-z]) [[:^alpha:]] Negated ASCII character class ([^A-Za-z]) [\[\]] Escaping in character classes (matching [ or ]) </pre> Any ASCII or Perl character class may appear inside a bracketed `[...]` character class. For example, `[\s[:digit:]]` matches any digit or space character. Precedence in character classes, from most binding to least: 1. Ranges: `[a-cd]` == `[[a-c]d]` 2. Union: `[ab&&bc]` == `[[ab]&&[bc]]` 3. Negation: `[^a-z&&b]` == `[^[a-z&&b]]`. ### Composites <pre class="rust"> xy concatenation (x followed by y) x|y alternation (x or y, prefer x) </pre> This example shows how an alternation works, and what it means to prefer a branch in the alternation over subsequent branches. ``` use regex_lite::Regex; let haystack = "samwise"; // If 'samwise' comes first in our alternation, then it is // preferred as a match, even if the regex engine could // technically detect that 'sam' led to a match earlier. let re = Regex::new(r"samwise|sam").unwrap(); assert_eq!("samwise", re.find(haystack).unwrap().as_str()); // But if 'sam' comes first, then it will match instead. // In this case, it is impossible for 'samwise' to match // because 'sam' is a prefix of it. let re = Regex::new(r"sam|samwise").unwrap(); assert_eq!("sam", re.find(haystack).unwrap().as_str()); ``` ### Repetitions <pre class="rust"> x* zero or more of x (greedy) x+ one or more of x (greedy) x? zero or one of x (greedy) x*? zero or more of x (ungreedy/lazy) x+? one or more of x (ungreedy/lazy) x?? zero or one of x (ungreedy/lazy) x{n,m} at least n x and at most m x (greedy) x{n,} at least n x (greedy) x{n} exactly n x x{n,m}? at least n x and at most m x (ungreedy/lazy) x{n,}? at least n x (ungreedy/lazy) x{n}? exactly n x </pre> ### Empty matches <pre class="rust"> ^ the beginning of a haystack (or start-of-line with multi-line mode) $ the end of a haystack (or end-of-line with multi-line mode) \A only the beginning of a haystack (even with multi-line mode enabled) \z only the end of a haystack (even with multi-line mode enabled) \b an ASCII word boundary (\w on one side and \W, \A, or \z on other) \B not an ASCII word boundary </pre> The empty regex is valid and matches the empty string. For example, the empty regex matches `abc` at positions `0`, `1`, `2` and `3`. When using the top-level [`Regex`] on `&str` haystacks, an empty match that splits a codepoint is guaranteed to never be returned. For example: ```rust let re = regex_lite::Regex::new(r"").unwrap(); let ranges: Vec<_> = re.find_iter("💩").map(|m| m.range()).collect(); assert_eq!(ranges, vec![0..0, 4..4]); ``` Note that an empty regex is distinct from a regex that can never match. For example, the regex `[^\s\S]` is a character class that represents the negation of `[\s\S]`, where the union of `\s` and `\S` corresponds to all Unicode scalar values. The negation of everything is nothing, which means the character class is empty. Since nothing is in the empty set, `[^\s\S]` matches nothing, not even the empty string. ### Grouping and flags <pre class="rust"> (exp) numbered capture group (indexed by opening parenthesis) (?P&lt;name&gt;exp) named (also numbered) capture group (names must be alpha-numeric) (?&lt;name&gt;exp) named (also numbered) capture group (names must be alpha-numeric) (?:exp) non-capturing group (?flags) set flags within current group (?flags:exp) set flags for exp (non-capturing) </pre> Capture group names must be any sequence of alpha-numeric Unicode codepoints, in addition to `.`, `_`, `[` and `]`. Names must start with either an `_` or an alphabetic codepoint. Alphabetic codepoints correspond to the `Alphabetic` Unicode property, while numeric codepoints correspond to the union of the `Decimal_Number`, `Letter_Number` and `Other_Number` general categories. Flags are each a single character. For example, `(?x)` sets the flag `x` and `(?-x)` clears the flag `x`. Multiple flags can be set or cleared at the same time: `(?xy)` sets both the `x` and `y` flags and `(?x-y)` sets the `x` flag and clears the `y` flag. All flags are by default disabled unless stated otherwise. They are: <pre class="rust"> i case-insensitive: letters match both upper and lower case m multi-line mode: ^ and $ match begin/end of line s allow . to match \n R enables CRLF mode: when multi-line mode is enabled, \r\n is used U swap the meaning of x* and x*? x verbose mode, ignores whitespace and allow line comments (starting with `#`) </pre> Note that in verbose mode, whitespace is ignored everywhere, including within character classes. To insert whitespace, use its escaped form or a hex literal. For example, `\ ` or `\x20` for an ASCII space. Flags can be toggled within a pattern. Here's an example that matches case-insensitively for the first part but case-sensitively for the second part: ```rust use regex_lite::Regex; let re = Regex::new(r"(?i)a+(?-i)b+").unwrap(); let m = re.find("AaAaAbbBBBb").unwrap(); assert_eq!(m.as_str(), "AaAaAbb"); ``` Notice that the `a+` matches either `a` or `A`, but the `b+` only matches `b`. Multi-line mode means `^` and `$` no longer match just at the beginning/end of the input, but also at the beginning/end of lines: ``` use regex_lite::Regex; let re = Regex::new(r"(?m)^line \d+").unwrap(); let m = re.find("line one\nline 2\n").unwrap(); assert_eq!(m.as_str(), "line 2"); ``` Note that `^` matches after new lines, even at the end of input: ``` use regex_lite::Regex; let re = Regex::new(r"(?m)^").unwrap(); let m = re.find_iter("test\n").last().unwrap(); assert_eq!((m.start(), m.end()), (5, 5)); ``` When both CRLF mode and multi-line mode are enabled, then `^` and `$` will match either `\r` and `\n`, but never in the middle of a `\r\n`: ``` use regex_lite::Regex; let re = Regex::new(r"(?mR)^foo$").unwrap(); let m = re.find("\r\nfoo\r\n").unwrap(); assert_eq!(m.as_str(), "foo"); ``` ### Escape sequences Note that this includes all possible escape sequences, even ones that are documented elsewhere. <pre class="rust"> \* literal *, applies to all ASCII except [0-9A-Za-z<>] \a bell (\x07) \f form feed (\x0C) \t horizontal tab \n new line \r carriage return \v vertical tab (\x0B) \A matches at the beginning of a haystack \z matches at the end of a haystack \b word boundary assertion \B negated word boundary assertion \x7F hex character code (exactly two digits) \x{10FFFF} any hex character code corresponding to a Unicode code point \u007F hex character code (exactly four digits) \u{7F} any hex character code corresponding to a Unicode code point \U0000007F hex character code (exactly eight digits) \U{7F} any hex character code corresponding to a Unicode code point \d, \s, \w Perl character class \D, \S, \W negated Perl character class </pre> ### Perl character classes (ASCII only) These character classes are short-hands for common groups of characters. In this crate, `\d`, `\s` and `\w` only consist of ASCII codepoints. <pre class="rust"> \d digit ([0-9]) \D not digit \s whitespace ([\t\n\v\f\r ]) \S not whitespace \w word character ([0-9A-Za-z_]) \W not word character </pre> ### ASCII character classes These reflect additional groups of characters taken from POSIX regex syntax that are sometimes useful to have. In this crate, all of these classes only consist of ASCII codepoints. <pre class="rust"> [[:alnum:]] alphanumeric ([0-9A-Za-z]) [[:alpha:]] alphabetic ([A-Za-z]) [[:ascii:]] ASCII ([\x00-\x7F]) [[:blank:]] blank ([\t ]) [[:cntrl:]] control ([\x00-\x1F\x7F]) [[:digit:]] digits ([0-9]) [[:graph:]] graphical ([!-~]) [[:lower:]] lower case ([a-z]) [[:print:]] printable ([ -~]) [[:punct:]] punctuation ([!-/:-@\[-`{-~]) [[:space:]] whitespace ([\t\n\v\f\r ]) [[:upper:]] upper case ([A-Z]) [[:word:]] word characters ([0-9A-Za-z_]) [[:xdigit:]] hex digit ([0-9A-Fa-f]) </pre> # Untrusted input This crate is meant to be able to run regex searches on untrusted haystacks without fear of [ReDoS]. This crate also, to a certain extent, supports untrusted patterns. [ReDoS]: https://en.wikipedia.org/wiki/ReDoS This crate differs from most (but not all) other regex engines in that it doesn't use unbounded backtracking to run a regex search. In those cases, one generally cannot use untrusted patterns *or* untrusted haystacks because it can be very difficult to know whether a particular pattern will result in catastrophic backtracking or not. We'll first discuss how this crate deals with untrusted inputs and then wrap it up with a realistic discussion about what practice really looks like. ### Panics Outside of clearly documented cases, most APIs in this crate are intended to never panic regardless of the inputs given to them. For example, `Regex::new`, `Regex::is_match`, `Regex::find` and `Regex::captures` should never panic. That is, it is an API promise that those APIs will never panic no matter what inputs are given to them. With that said, regex engines are complicated beasts, and providing a rock solid guarantee that these APIs literally never panic is essentially equivalent to saying, "there are no bugs in this library." That is a bold claim, and not really one that can be feasibly made with a straight face. Don't get the wrong impression here. This crate is extensively tested, not just with unit and integration tests, but also via fuzz testing. For example, this crate is part of the [OSS-fuzz project]. Panics should be incredibly rare, but it is possible for bugs to exist, and thus possible for a panic to occur. If you need a rock solid guarantee against panics, then you should wrap calls into this library with [`std::panic::catch_unwind`]. It's also worth pointing out that this library will generally panic when other regex engines would commit undefined behavior. When undefined behavior occurs, your program might continue as if nothing bad has happened, but it also might mean your program is open to the worst kinds of exploits. In contrast, the worst thing a panic can do is a denial of service. [OSS-fuzz project]: https://android.googlesource.com/platform/external/oss-fuzz/+/refs/tags/android-t-preview-1/projects/rust-regex/ [`std::panic::catch_unwind`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html ### Untrusted patterns The principal way this crate deals with them is by limiting their size by default. The size limit can be configured via [`RegexBuilder::size_limit`]. The idea of a size limit is that compiling a pattern into a `Regex` will fail if it becomes "too big." Namely, while *most* resources consumed by compiling a regex are approximately proportional to the length of the pattern itself, there is one particular exception to this: counted repetitions. Namely, this pattern: ```text a{5}{5}{5}{5}{5}{5} ``` Is equivalent to this pattern: ```text a{15625} ``` In both of these cases, the actual pattern string is quite small, but the resulting `Regex` value is quite large. Indeed, as the first pattern shows, it isn't enough to locally limit the size of each repetition because they can be stacked in a way that results in exponential growth. To provide a bit more context, a simplified view of regex compilation looks like this: * The pattern string is parsed into a structured representation called an HIR (high-level intermediate representation). Counted repetitions are not expanded in this stage. That is, the size of the HIR is proportional to the size of the pattern with "reasonable" constant factors. In other words, one can reasonably limit the memory used by an HIR by limiting the length of the pattern string. * The HIR is compiled into a [Thompson NFA]. This is the stage at which something like `\w{5}` is rewritten to `\w\w\w\w\w`. Thus, this is the stage at which [`RegexBuilder::size_limit`] is enforced. If the NFA exceeds the configured size, then this stage will fail. [Thompson NFA]: https://en.wikipedia.org/wiki/Thompson%27s_construction The size limit helps avoid two different kinds of exorbitant resource usage: * It avoids permitting exponential memory usage based on the size of the pattern string. * It avoids long search times. This will be discussed in more detail in the next section, but worst case search time *is* dependent on the size of the regex. So keeping regexes limited to a reasonable size is also a way of keeping search times reasonable. Finally, it's worth pointing out that regex compilation is guaranteed to take worst case `O(m)` time, where `m` is proportional to the size of regex. The size of the regex here is *after* the counted repetitions have been expanded. **Advice for those using untrusted regexes**: limit the pattern length to something small and expand it as needed. Configure [`RegexBuilder::size_limit`] to something small and then expand it as needed. ### Untrusted haystacks The main way this crate guards against searches from taking a long time is by using algorithms that guarantee a `O(m * n)` worst case time and space bound. Namely: * `m` is proportional to the size of the regex, where the size of the regex includes the expansion of all counted repetitions. (See the previous section on untrusted patterns.) * `n` is proportional to the length, in bytes, of the haystack. In other words, if you consider `m` to be a constant (for example, the regex pattern is a literal in the source code), then the search can be said to run in "linear time." Or equivalently, "linear time with respect to the size of the haystack." But the `m` factor here is important not to ignore. If a regex is particularly big, the search times can get quite slow. This is why, in part, [`RegexBuilder::size_limit`] exists. **Advice for those searching untrusted haystacks**: As long as your regexes are not enormous, you should expect to be able to search untrusted haystacks without fear. If you aren't sure, you should benchmark it. Unlike backtracking engines, if your regex is so big that it's likely to result in slow searches, this is probably something you'll be able to observe regardless of what the haystack is made up of. ### Iterating over matches One thing that is perhaps easy to miss is that the worst case time complexity bound of `O(m * n)` applies to methods like [`Regex::is_match`], [`Regex::find`] and [`Regex::captures`]. It does **not** apply to [`Regex::find_iter`] or [`Regex::captures_iter`]. Namely, since iterating over all matches can execute many searches, and each search can scan the entire haystack, the worst case time complexity for iterators is `O(m * n^2)`. One example of where this occurs is when a pattern consists of an alternation, where an earlier branch of the alternation requires scanning the entire haystack only to discover that there is no match. It also requires a later branch of the alternation to have matched at the beginning of the search. For example, consider the pattern `.*[^A-Z]|[A-Z]` and the haystack `AAAAA`. The first search will scan to the end looking for matches of `.*[^A-Z]` even though a finite automata engine (as in this crate) knows that `[A-Z]` has already matched the first character of the haystack. This is due to the greedy nature of regex searching. That first search will report a match at the first `A` only after scanning to the end to discover that no other match exists. The next search then begins at the second `A` and the behavior repeats. There is no way to avoid this. This means that if both patterns and haystacks are untrusted and you're iterating over all matches, you're susceptible to worst case quadratic time complexity. One possible way to mitigate this is to switch to the lower level `regex-automata` crate and use its `meta::Regex` iterator APIs. There, you can configure the search to operate in "earliest" mode by passing a `Input::new(haystack).earliest(true)` to `meta::Regex::find_iter` (for example). By enabling this mode, you give up the normal greedy match semantics of regex searches and instead ask the regex engine to immediately stop as soon as a match has been found. Enabling this mode will thus restore the worst case `O(m * n)` time complexity bound, but at the cost of different semantics. ### Untrusted inputs in practice While providing a `O(m * n)` worst case time bound on all searches goes a long way toward preventing [ReDoS], that doesn't mean every search you can possibly run will complete without burning CPU time. In general, there are a few ways for the `m * n` time bound to still bite you: * You are searching an exceptionally long haystack. No matter how you slice it, a longer haystack will take more time to search. * Very large regexes can searches to be quite slow due to increasing the size `m` in the worst case `O(m * n)` bound. This is especially true when they are combined with counted repetitions. While the regex size limit above will protect you from the most egregious cases, the the default size limit still permits pretty big regexes that can execute more slowly than one might expect. * While routines like [`Regex::find`] and [`Regex::captures`] guarantee worst case `O(m * n)` search time, routines like [`Regex::find_iter`] and [`Regex::captures_iter`] actually have worst case `O(m * n^2)` search time. This is because `find_iter` runs many searches, and each search takes worst case `O(m * n)` time. Thus, iteration of all matches in a haystack has worst case `O(m * n^2)`. A good example of a pattern that exhibits this is `(?:A+){1000}|` or even `.*[^A-Z]|[A-Z]`. In general, unstrusted haystacks are easier to stomach than untrusted patterns. Untrusted patterns give a lot more control to the caller to impact the performance of a search. Therefore, permitting untrusted patterns means that your only line of defense is to put a limit on how big `m` (and perhaps also `n`) can be in `O(m * n)`. `n` is limited by simply inspecting the length of the haystack while `m` is limited by *both* applying a limit to the length of the pattern *and* a limit on the compiled size of the regex via [`RegexBuilder::size_limit`]. It bears repeating: if you're accepting untrusted patterns, it would be a good idea to start with conservative limits on `m` and `n`, and then carefully increase them as needed. */ #![no_std] // I'm not ideologically opposed to allowing non-safe code in this crate, but // IMO it needs really excellent justification. One likely place where this // could show up is if and when we support a non-std alloc mode. In that case, // we need some way to synchronize access to a PikeVM cache. That in turn will // likely require rolling our own primitive spin-lock or similar structure. #![forbid(unsafe_code)] #![deny(missing_docs, rustdoc::broken_intra_doc_links)] #![warn(missing_debug_implementations)] // When the main features are disabled, squash dead code warnings. The // alternative is to litter conditional compilation directives everywhere, // which is super annoying. #![cfg_attr(not(feature = "string"), allow(dead_code))] #[cfg(not(feature = "std"))] compile_error!("'std' is currently a required feature, please file an issue"); #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] compile_error!("not supported on non-{32,64}, please file an issue"); extern crate alloc; #[cfg(any(test, feature = "std"))] extern crate std; #[cfg(feature = "string")] pub use self::string::*; pub use self::{error::Error, hir::escape}; mod error; mod hir; mod int; mod interpolate; mod nfa; mod pikevm; mod pool; #[cfg(feature = "string")] mod string; mod utf8; <file_sep>/regex-cli/args/meta.rs use std::borrow::Borrow; use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::meta, regex_syntax::hir::Hir, }; use crate::args::{self, flags, Configurable, Usage}; /// Exposes the configuration of a lazy DFA. #[derive(Debug, Default)] pub struct Config { meta: meta::Config, build_from_patterns: bool, } impl Config { /// Return a `meta::Config` object from this configuration. pub fn meta(&self) -> anyhow::Result<meta::Config> { Ok(self.meta.clone()) } /// Whether to build a meta regex directly from the pattern strings, or to /// require the caller to build their own HIR first. /// /// i.e., Whether the caller should use `from_patterns` or `from_hirs`. pub fn build_from_patterns(&self) -> bool { self.build_from_patterns } /// Build a meta regex from the pattern strings given. pub fn from_patterns<P: AsRef<str>>( &self, syntax: &crate::args::syntax::Config, patterns: &[P], ) -> anyhow::Result<meta::Regex> { meta::Builder::new() .configure(self.meta()?) .syntax(syntax.syntax()?) .build_many(patterns) .context("failed to compile meta regex") } /// Build a meta regex from the HIRs given. pub fn from_hirs<H: Borrow<Hir>>( &self, hirs: &[H], ) -> anyhow::Result<meta::Regex> { meta::Builder::new() .configure(self.meta()?) .build_many_from_hir(hirs) .context("failed to compile meta regex") } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("build-from-patterns") => { self.build_from_patterns = true; } Arg::Short('k') | Arg::Long("match-kind") => { let kind: flags::MatchKind = args::parse(p, "-k/--match-kind")?; self.meta = self.meta.clone().match_kind(kind.kind); } Arg::Short('B') | Arg::Long("no-utf8-nfa") => { self.meta = self.meta.clone().utf8_empty(false); } Arg::Long("no-auto-prefilter") => { self.meta = self.meta.clone().auto_prefilter(false); } Arg::Long("nfa-size-limit") => { let limit = args::parse_maybe(p, "--nfa-size-limit")?; self.meta = self.meta.clone().nfa_size_limit(limit); } Arg::Long("onepass-size-limit") => { let limit = args::parse_maybe(p, "--onepass-size-limit")?; self.meta = self.meta.clone().onepass_size_limit(limit); } Arg::Long("cache-capacity") => { let capacity = args::parse(p, "--cache-capacity")?; self.meta = self.meta.clone().hybrid_cache_capacity(capacity); } Arg::Long("dfa-size-limit") => { let limit = args::parse_maybe(p, "--dfa-size-limit")?; self.meta = self.meta.clone().dfa_size_limit(limit); } Arg::Long("dfa-state-limit") => { let limit = args::parse_maybe(p, "--dfa-state-limit")?; self.meta = self.meta.clone().dfa_state_limit(limit); } Arg::Short('C') | Arg::Long("no-byte-classes") => { self.meta = self.meta.clone().byte_classes(false); } Arg::Long("line-terminator") => { let byte: flags::OneByte = args::parse(p, "--line-terminator")?; self.meta = self.meta.clone().line_terminator(byte.0); } Arg::Long("no-hybrid") => { self.meta = self.meta.clone().hybrid(false); } Arg::Long("no-dfa") => { self.meta = self.meta.clone().dfa(false); } Arg::Long("no-onepass") => { self.meta = self.meta.clone().onepass(false); } Arg::Long("no-backtrack") => { self.meta = self.meta.clone().backtrack(false); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "--build-from-patterns", "Build a meta regex directly from pattern strings.", r#" Build a meta regex directly from pattern strings. By default, a meta regex is built in this tool by first explicitly parsing the patterns into ASTs, then translating them into HIRs and finally providing the HIRs to the meta regex builder. This flag changes the behavior to pass the pattern strings directly to the meta regex builder such that the builder is responsible for parsing and translating. The main reason to use this is if you specifically want to test the meta regex builder from patterns directly, as it may contain optimizations for skipping aspects of parsing. The default behavior splits these steps out in order to time them so that one gets a good idea of where most time is being spent during meta regex construction. "#, ), flags::MatchKind::USAGE, Usage::new( "-B, --no-utf8-nfa", "Disable UTF-8 mode for empty matches.", r#" Disables UTF-8 mode for empty matches. When this flag is given, empty matches that split a codepoint are permitted. Otherwise, they are banned. "#, ), Usage::new( "--no-auto-prefilter", "Disable automatic prefilters.", r#" By default, a meta regex is accelerated via prefilters if one can be extracted from the literals in the pattern. By passing this flag, the automatic prefilter optimization is disabled. "#, ), Usage::new( "--nfa-size-limit", "Sets a limit on the memory used by an NFA.", r#" Sets a limit on the memory used by the NFA built by the meta regex engine, in terms of bytes of heap usage. This limit is applied during NFA construction. If the limit is exceeded, then construction will fail. A special 'none' value disables the limit entirely. "#, ), Usage::new( "--onepass-size-limit", "Set a limit on heap used by a one-pass DFA in bytes.", r#" This sets a limit on the number of heap memory a one-pass DFA built by the meta regex engine can use. The limit is enforced at one-pass DFA construction time. If the limit is exceeded, then construction will fail. A special value of 'none' may be given, which disables the limit. "#, ), Usage::new( "--cache-capacity", "Set the total cache capacity for the lazy DFA.", r#" This sets an approximate limit on the total amount of heap memory used by the lazy DFA. This only applies when the meta regex engine uses a lazy DFA. Once the cache reaches capacity and there's no more room for additional states, the cache is cleared and the lazy DFA keeps rebuilding itself. "#, ), Usage::new( "--dfa-size-limit", "Set a limit on heap used by a DFA in bytes.", r#" This sets a limit on the number of heap memory a DFA built by the meta regex engine can use. The limit is enforced at DFA construction time. If the limit is exceeded, then construction will fail. A special value of 'none' may be given, which disables the limit. "#, ), Usage::new( "--dfa-state-limit", "Only build a DFA when the NFA is below this limit.", r#" When an NFA built by the meta regex engine has a number of states below this limit, the meta regex engine may choose to build a fully compiled DFA. A special value of 'none' may be given, which disables the limit. When not set, the default is a very small number. Unless you know what you're doing, the limit should be kept small since DFA construction is exponential in the number of DFA states. Even 2^N where N is a small number can be quite large, and this is why there is also the --dfa-size-limit to ensure the DFA cannot get too big. "#, ), Usage::new( "-C, --no-byte-classes", "Disable byte classes.", r#" This causes all bytes to be an equivalence class unto themselves. By default, bytes are grouped into equivalence classes to reduce the size of the alphabet for a DFA, and therefore decreases overall space usage. It can be quite convenient to disable byte classes when looking at the debug representation of a DFA. Otherwise, the transitions are much harder for a human to read. "#, ), Usage::new( "--line-terminator", "Set the line terminator used by line anchors.", r#" Set the line terminator used by line anchors. The line anchors are '(?m:^)' and '(?m:$)'. By default, they both use '\n' as line terminators for matching purposes. This option changes the line terminator to any arbitrary byte. Note that CRLF aware line anchors, that is, '(?Rm:^)' and '(?Rm:$)', are unaffected by this option. CRLF aware line anchors always use '\r' and '\n' as line terminators and do not match between a '\r' and '\n'. "#, ), Usage::new( "--no-hybrid", "Disable the use of a lazy DFA.", r#" This prevents the meta regex engine from building and using a lazy DFA. "#, ), Usage::new( "--no-dfa", "Disable the use of a fully compiled DFA.", r#" This prevents the meta regex engine from building and using a fully compiled DFA. "#, ), Usage::new( "--no-onepass", "Disable the use of a one-pass DFA.", r#" This prevents the meta regex engine from building and using a one-pass DFA. "#, ), Usage::new( "--no-backtrack", "Disable the use of a bounded backtracker.", r#" This prevents the meta regex engine from building and using a bounded backtracker. "#, ), ]; USAGES } } <file_sep>/regex-cli/cmd/debug/literal.rs use std::io::{stdout, Write}; use { anyhow::Context, bstr::BString, lexopt::{Arg, Parser, ValueExt}, regex_syntax::hir::literal::{ExtractKind, Extractor}, }; use crate::{ args::{self, Configurable, Usage}, util::{self, Table}, }; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of extract literals from a regex pattern. Note that the literals this command prints by default should roughly reflect what regex-automata's meta regex engine does by default. In particular, this will optimize the extracted literals and will do so under the presumption of leftmost-first match semantics. The --no-optimize flag can be given to skip this optimization step and instead get the literals precisely as they were extracted. USAGE: regex-cli debug literal <pattern> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut literal = Literal::default(); args::configure( p, USAGE, &mut [&mut common, &mut patterns, &mut syntax, &mut literal], )?; let pats = patterns.get()?; anyhow::ensure!( pats.len() == 1, "only one pattern is allowed, but {} were given", pats.len(), ); let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (mut seq, time) = util::timeit(|| literal.extractor.extract(&hirs[0])); table.add("extraction time", time); if literal.optimize.unwrap_or(true) { let ((), time) = util::timeitr(|| { match literal.kind { ExtractKind::Prefix => seq.optimize_for_prefix_by_preference(), ExtractKind::Suffix => seq.optimize_for_suffix_by_preference(), unk => { anyhow::bail!( "unsupported literal extraction kind: {:?}", unk ) } } Ok(()) })?; table.add("optimization time", time); } table.add("len", seq.len()); table.add("is finite?", seq.is_finite()); table.add("is exact?", seq.is_exact()); table.add("min literal len", seq.min_literal_len()); table.add("max literal len", seq.max_literal_len()); table.add( "longest common prefix", seq.longest_common_prefix().map(BString::from), ); table.add( "longest common suffix", seq.longest_common_suffix().map(BString::from), ); if common.table() { table.print(stdout())?; } if !common.quiet { let mut out = stdout(); if common.table() { writeln!(out, "")?; } match seq.literals() { None => writeln!(out, "{:?}", seq)?, Some(literals) => { for lit in literals.iter() { writeln!(stdout(), "{:?}", lit)?; } } } } Ok(()) } #[derive(Debug, Default)] struct Literal { extractor: Extractor, kind: ExtractKind, optimize: Option<bool>, } impl Configurable for Literal { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("extract-kind") => { let value = p.value().context("--extract-kind")?; let value = value.string().context("--extract-kind")?; let kind = match &*value { "prefix" => ExtractKind::Prefix, "suffix" => ExtractKind::Suffix, unk => anyhow::bail!( "unknown value for --extract-kind: {}", unk ), }; self.kind = kind.clone(); self.extractor.kind(kind); } Arg::Long("limit-class") => { let limit = args::parse(p, "--limit-class")?; self.extractor.limit_class(limit); } Arg::Long("limit-repeat") => { let limit = args::parse(p, "--limit-repeat")?; self.extractor.limit_repeat(limit); } Arg::Long("limit-literal-len") => { let limit = args::parse(p, "--limit-literal-len")?; self.extractor.limit_literal_len(limit); } Arg::Long("limit-total") => { let limit = args::parse(p, "--limit-total")?; self.extractor.limit_total(limit); } Arg::Long("no-optimize") => { self.optimize = Some(false); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "--extract-kind <kind>", "The literals to extract, either 'prefix' or 'suffix'.", r#" This sets the kind of literals to extract. Either 'prefix' literals can be extracted (the default) or 'suffix' literals. "#, ), Usage::new( "--limit-class <limit>", "The maximum size of a character class to support.", r#" This limit controls how big a character class needs to be for literal extraction to ignore it. In practice, large classes aren't good for literal extraction because it becomes easy to create very large sets of literals that aren't practical to search quickly for. "#, ), Usage::new( "--limit-repeat <limit>", "The maximum repeat size to expand.", r#" Literal extraction will attempt to expand bounded repetitions like '(abcd){50}'. But repetitions can become large easily, and so it makes sense to stop at a certain point. This limit specifies that point. "#, ), Usage::new( "--limit-literal-len <limit>", "The maximum length of a literal to extract.", r#" This limit caps the maximum length of a literal that is extract. If a literal would otherwise get longer than this limit, then it is cut and prevented from being expanded upon. "#, ), Usage::new( "--limit-total <limit>", "Limits the total number of literals to extract.", r#" This limit applies to the total number of literals to extract. If the number of literals would exceed this number, then literal extraction may use heuristics to cut the set before proceeding. In some cases though, this may cause extraction to give up entirely and return no literals. This limit tends to act as a backstop catch-all for when other limits fail. For example, '[ab]{3}{3}' uses small bounded repetitions and a small character class. The actual literals it generates are also pretty small. But the number of total literals it creates is quite large (512) despite each of its constituent pieces being quite small. Thus, this limit protects against cases like these by preventing the total size of the extracted literal set from getting too big. "#, ), Usage::new( "--no-optimize", "Don't attempt to optimize the extracted literals.", r#" This flag disables "optimization" of the extracted literals. Optimization is performed by default as it reflects what the meta regex engine does by default. Optimization is the "black magic" part of literal extraction that uses heuristics to guess at what kinds of literal sets are better to search for. Generally speaking, you want a small number of a literals to make multiple substring vector algorithms faster, but you want your literals to be longer so that they're more discriminatory and overall reduce their false positive rate. "#, ), ]; USAGES } } <file_sep>/regex-cli/cmd/generate/mod.rs use crate::args; mod fowler; mod serialize; mod unicode; const USAGE: &'static str = r#" A tool for doing various types of generation tasks. This includes things like serializing DFAs to be compiled into other programs, and generating the Unicode tables used by the regex project. USAGE: regex-cli generate <command> COMMANDS: fowler Convert Glenn Fowler's test suite to TOML files. serialize Serialize DFAs and generate Rust code to load them. unicode Generate all Unicode tables required for the regex project. "#; pub fn run(p: &mut lexopt::Parser) -> anyhow::Result<()> { match &*args::next_as_command(USAGE, p)? { "fowler" => fowler::run(p), "serialize" => serialize::run(p), "unicode" => unicode::run(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } <file_sep>/fuzz/fuzz_targets/fuzz_regex_automata_deserialize_sparse_dfa.rs #![no_main] use libfuzzer_sys::{fuzz_target, Corpus}; fuzz_target!(|data: &[u8]| -> Corpus { run(data).map_or(Corpus::Reject, |_| Corpus::Keep) }); fn run(given_data: &[u8]) -> Option<()> { use regex_automata::dfa::Automaton; let _ = env_logger::try_init(); if given_data.len() < 2 { return None; } let haystack_len = usize::from(given_data[0]); let haystack = given_data.get(1..1 + haystack_len)?; let given_dfa_bytes = given_data.get(1 + haystack_len..)?; // We help the fuzzer along by adding a preamble to the bytes that should // at least make these first parts valid. The preamble expects a very // specific sequence of bytes, so it makes sense to just force this. let label = "rust-regex-automata-dfa-sparse\x00\x00"; assert_eq!(0, label.len() % 4); let endianness_check = 0xFEFFu32.to_ne_bytes().to_vec(); let version_check = 2u32.to_ne_bytes().to_vec(); let mut dfa_bytes: Vec<u8> = vec![]; dfa_bytes.extend(label.as_bytes()); dfa_bytes.extend(&endianness_check); dfa_bytes.extend(&version_check); dfa_bytes.extend(given_dfa_bytes); // This is the real test: checking that any input we give to // DFA::from_bytes will never result in a panic. let (dfa, _) = regex_automata::dfa::sparse::DFA::from_bytes(&dfa_bytes).ok()?; let _ = dfa.try_search_fwd(&regex_automata::Input::new(haystack)); Some(()) } <file_sep>/regex-automata/src/util/prefilter/memmem.rs use crate::util::{ prefilter::PrefilterI, search::{MatchKind, Span}, }; #[derive(Clone, Debug)] pub(crate) struct Memmem { #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] _unused: (), #[cfg(all(feature = "std", feature = "perf-literal-substring"))] finder: memchr::memmem::Finder<'static>, } impl Memmem { pub(crate) fn new<B: AsRef<[u8]>>( _kind: MatchKind, needles: &[B], ) -> Option<Memmem> { #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] { None } #[cfg(all(feature = "std", feature = "perf-literal-substring"))] { if needles.len() != 1 { return None; } let needle = needles[0].as_ref(); let finder = memchr::memmem::Finder::new(needle).into_owned(); Some(Memmem { finder }) } } } impl PrefilterI for Memmem { fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] { unreachable!() } #[cfg(all(feature = "std", feature = "perf-literal-substring"))] { self.finder.find(&haystack[span]).map(|i| { let start = span.start + i; let end = start + self.finder.needle().len(); Span { start, end } }) } } fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] { unreachable!() } #[cfg(all(feature = "std", feature = "perf-literal-substring"))] { let needle = self.finder.needle(); if haystack[span].starts_with(needle) { Some(Span { end: span.start + needle.len(), ..span }) } else { None } } } fn memory_usage(&self) -> usize { #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] { unreachable!() } #[cfg(all(feature = "std", feature = "perf-literal-substring"))] { self.finder.needle().len() } } fn is_fast(&self) -> bool { #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] { unreachable!() } #[cfg(all(feature = "std", feature = "perf-literal-substring"))] { true } } } <file_sep>/regex-lite/src/int.rs use core::num::NonZeroUsize; /// An extension trait that adds routines to the `u32` primitive type. pub(crate) trait U32 { fn as_usize(self) -> usize; } impl U32 for u32 { fn as_usize(self) -> usize { // OK because we require 32 or 64 bit targets. Therefore, every u32 // necessarily fits into a usize. self as usize } } /// A `usize` that can never be `usize::MAX`. /// /// This is similar to `core::num::NonZeroUsize`, but instead of not permitting /// a zero value, this does not permit a max value. /// /// This is useful in certain contexts where one wants to optimize the memory /// usage of things that contain match offsets. Namely, since Rust slices /// are guaranteed to never have a length exceeding `isize::MAX`, we can use /// `usize::MAX` as a sentinel to indicate that no match was found. Indeed, /// types like `Option<NonMaxUsize>` have exactly the same size in memory as a /// `usize`. /// /// This type is defined to be `repr(transparent)` for /// `core::num::NonZeroUsize`, which is in turn defined to be /// `repr(transparent)` for `usize`. #[derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)] #[repr(transparent)] pub(crate) struct NonMaxUsize(NonZeroUsize); impl NonMaxUsize { /// Create a new `NonMaxUsize` from the given value. /// /// This returns `None` only when the given value is equal to `usize::MAX`. pub(crate) fn new(value: usize) -> Option<NonMaxUsize> { NonZeroUsize::new(value.wrapping_add(1)).map(NonMaxUsize) } /// Return the underlying `usize` value. The returned value is guaranteed /// to not equal `usize::MAX`. pub(crate) fn get(self) -> usize { self.0.get().wrapping_sub(1) } } // We provide our own Debug impl because seeing the internal repr can be quite // surprising if you aren't expecting it. e.g., 'NonMaxUsize(5)' vs just '5'. impl core::fmt::Debug for NonMaxUsize { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{:?}", self.get()) } } <file_sep>/src/regex/mod.rs pub(crate) mod bytes; pub(crate) mod string; <file_sep>/regex-lite/README.md regex-lite ========== This crate provides a **lightweight** regex engine for searching strings. The regex syntax supported by this crate is nearly identical to what is found in the `regex` crate. Like the `regex` crate, all regex searches in this crate have worst case `O(m * n)` time complexity, where `m` is proportional to the size of the regex and `n` is proportional to the size of the string being searched. [![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) [![Crates.io](https://img.shields.io/crates/v/regex-lite.svg)](https://crates.io/crates/regex-lite) ### Documentation https://docs.rs/regex-lite ### Usage To bring this crate into your repository, either add `regex-lite` to your `Cargo.toml`, or run `cargo add regex-lite`. Here's a simple example that matches a date in YYYY-MM-DD format and prints the year, month and day: ```rust use regex_lite::Regex; fn main() { let re = Regex::new(r"(?x) (?P<year>\d{4}) # the year - (?P<month>\d{2}) # the month - (?P<day>\d{2}) # the day ").unwrap(); let caps = re.captures("2010-03-14").unwrap(); assert_eq!("2010", &caps["year"]); assert_eq!("03", &caps["month"]); assert_eq!("14", &caps["day"]); } ``` If you have lots of dates in text that you'd like to iterate over, then it's easy to adapt the above example with an iterator: ```rust use regex::Regex; const TO_SEARCH: &'static str = " On 2010-03-14, foo happened. On 2014-10-14, bar happened. "; fn main() { let re = Regex::new(r"(\d{4})-(\d{2})-(\d{2})").unwrap(); for caps in re.captures_iter(TO_SEARCH) { // Note that all of the unwraps are actually OK for this regex // because the only way for the regex to match is if all of the // capture groups match. This is not true in general though! println!("year: {}, month: {}, day: {}", caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str(), caps.get(3).unwrap().as_str()); } } ``` This example outputs: ```text year: 2010, month: 03, day: 14 year: 2014, month: 10, day: 14 ``` ### Minimum Rust version policy This crate's minimum supported `rustc` version is `1.60.0`. The policy is that the minimum Rust version required to use this crate can be increased in semver compatible updates. ### Motivation The primary purpose of this crate is to provide an alternative regex engine for folks that are unhappy with the binary size and compilation time of the primary `regex` crate. The `regex-lite` crate does the absolute minimum possible to act as a drop-in replacement to the `regex` crate's `Regex` type. It avoids a lot of complexity by choosing not to optimize searches and to opt out of functionality such as robust Unicode support. By keeping the code simpler and smaller, we get binary sizes and compile times that are substantially better than even the `regex` crate with all of its features disabled. To make the benefits a bit more concrete, here are the results of one experiment I did. For `regex`, I disabled all features except for `std`: * `regex 1.7.3`: 1.41s compile time, 373KB relative size increase * `regex 1.8.1`: 1.46s compile time, 410KB relative size increase * `regex 1.9.0`: 1.93s compile time, 565KB relative size increase * `regex-lite 0.1.0`: 0.73s compile time, 94KB relative size increase The main reason why `regex-lite` does so much better than `regex` when all of `regex`'s features are disabled is because of irreducible complexity. There are certain parts of the code in `regex` that can't be arbitrarily divided based on binary size and compile time goals. It's instead more sustainable to just maintain an entirely separate crate. Ideas for improving the binary size and compile times of this crate even more are most welcome. ### License This project is licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. The data in `regex-syntax/src/unicode_tables/` is licensed under the Unicode License Agreement ([LICENSE-UNICODE](https://www.unicode.org/copyright.html#License)). <file_sep>/regex-automata/tests/dfa/regression.rs // A regression test for checking that minimization correctly translates // whether a state is a match state or not. Previously, it was possible for // minimization to mark a non-matching state as matching. #[test] #[cfg(not(miri))] fn minimize_sets_correct_match_states() { use regex_automata::{ dfa::{dense::DFA, Automaton, StartKind}, Anchored, Input, }; let pattern = // This is a subset of the grapheme matching regex. I couldn't seem // to get a repro any smaller than this unfortunately. r"(?x) (?: \p{gcb=Prepend}* (?: (?: (?: \p{gcb=L}* (?:\p{gcb=V}+|\p{gcb=LV}\p{gcb=V}*|\p{gcb=LVT}) \p{gcb=T}* ) | \p{gcb=L}+ | \p{gcb=T}+ ) | \p{Extended_Pictographic} (?:\p{gcb=Extend}*\p{gcb=ZWJ}\p{Extended_Pictographic})* | [^\p{gcb=Control}\p{gcb=CR}\p{gcb=LF}] ) [\p{gcb=Extend}\p{gcb=ZWJ}\p{gcb=SpacingMark}]* ) "; let dfa = DFA::builder() .configure( DFA::config().start_kind(StartKind::Anchored).minimize(true), ) .build(pattern) .unwrap(); let input = Input::new(b"\xE2").anchored(Anchored::Yes); assert_eq!(Ok(None), dfa.try_search_fwd(&input)); } <file_sep>/regex-automata/src/util/primitives.rs /*! Lower level primitive types that are useful in a variety of circumstances. # Overview This list represents the principle types in this module and briefly describes when you might want to use them. * [`PatternID`] - A type that represents the identifier of a regex pattern. This is probably the most widely used type in this module (which is why it's also re-exported in the crate root). * [`StateID`] - A type the represents the identifier of a finite automaton state. This is used for both NFAs and DFAs, with the notable exception of the hybrid NFA/DFA. (The hybrid NFA/DFA uses a special purpose "lazy" state identifier.) * [`SmallIndex`] - The internal representation of both a `PatternID` and a `StateID`. Its purpose is to serve as a type that can index memory without being as big as a `usize` on 64-bit targets. The main idea behind this type is that there are many things in regex engines that will, in practice, never overflow a 32-bit integer. (For example, like the number of patterns in a regex or the number of states in an NFA.) Thus, a `SmallIndex` can be used to index memory without peppering `as` casts everywhere. Moreover, it forces callers to handle errors in the case where, somehow, the value would otherwise overflow either a 32-bit integer or a `usize` (e.g., on 16-bit targets). * [`NonMaxUsize`] - Represents a `usize` that cannot be `usize::MAX`. As a result, `Option<NonMaxUsize>` has the same size in memory as a `usize`. This useful, for example, when representing the offsets of submatches since it reduces memory usage by a factor of 2. It is a legal optimization since Rust guarantees that slices never have a length that exceeds `isize::MAX`. */ use core::num::NonZeroUsize; #[cfg(feature = "alloc")] use alloc::vec::Vec; use crate::util::int::{Usize, U16, U32, U64}; /// A `usize` that can never be `usize::MAX`. /// /// This is similar to `core::num::NonZeroUsize`, but instead of not permitting /// a zero value, this does not permit a max value. /// /// This is useful in certain contexts where one wants to optimize the memory /// usage of things that contain match offsets. Namely, since Rust slices /// are guaranteed to never have a length exceeding `isize::MAX`, we can use /// `usize::MAX` as a sentinel to indicate that no match was found. Indeed, /// types like `Option<NonMaxUsize>` have exactly the same size in memory as a /// `usize`. /// /// This type is defined to be `repr(transparent)` for /// `core::num::NonZeroUsize`, which is in turn defined to be /// `repr(transparent)` for `usize`. #[derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)] #[repr(transparent)] pub struct NonMaxUsize(NonZeroUsize); impl NonMaxUsize { /// Create a new `NonMaxUsize` from the given value. /// /// This returns `None` only when the given value is equal to `usize::MAX`. #[inline] pub fn new(value: usize) -> Option<NonMaxUsize> { NonZeroUsize::new(value.wrapping_add(1)).map(NonMaxUsize) } /// Return the underlying `usize` value. The returned value is guaranteed /// to not equal `usize::MAX`. #[inline] pub fn get(self) -> usize { self.0.get().wrapping_sub(1) } } // We provide our own Debug impl because seeing the internal repr can be quite // surprising if you aren't expecting it. e.g., 'NonMaxUsize(5)' vs just '5'. impl core::fmt::Debug for NonMaxUsize { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{:?}", self.get()) } } /// A type that represents a "small" index. /// /// The main idea of this type is to provide something that can index memory, /// but uses less memory than `usize` on 64-bit systems. Specifically, its /// representation is always a `u32` and has `repr(transparent)` enabled. (So /// it is safe to transmute between a `u32` and a `SmallIndex`.) /// /// A small index is typically useful in cases where there is no practical way /// that the index will overflow a 32-bit integer. A good example of this is /// an NFA state. If you could somehow build an NFA with `2^30` states, its /// memory usage would be exorbitant and its runtime execution would be so /// slow as to be completely worthless. Therefore, this crate generally deems /// it acceptable to return an error if it would otherwise build an NFA that /// requires a slice longer than what a 32-bit integer can index. In exchange, /// we can use 32-bit indices instead of 64-bit indices in various places. /// /// This type ensures this by providing a constructor that will return an error /// if its argument cannot fit into the type. This makes it much easier to /// handle these sorts of boundary cases that are otherwise extremely subtle. /// /// On all targets, this type guarantees that its value will fit in a `u32`, /// `i32`, `usize` and an `isize`. This means that on 16-bit targets, for /// example, this type's maximum value will never overflow an `isize`, /// which means it will never overflow a `i16` even though its internal /// representation is still a `u32`. /// /// The purpose for making the type fit into even signed integer types like /// `isize` is to guarantee that the difference between any two small indices /// is itself also a small index. This is useful in certain contexts, e.g., /// for delta encoding. /// /// # Other types /// /// The following types wrap `SmallIndex` to provide a more focused use case: /// /// * [`PatternID`] is for representing the identifiers of patterns. /// * [`StateID`] is for representing the identifiers of states in finite /// automata. It is used for both NFAs and DFAs. /// /// # Representation /// /// This type is always represented internally by a `u32` and is marked as /// `repr(transparent)`. Thus, this type always has the same representation as /// a `u32`. It is thus safe to transmute between a `u32` and a `SmallIndex`. /// /// # Indexing /// /// For convenience, callers may use a `SmallIndex` to index slices. /// /// # Safety /// /// While a `SmallIndex` is meant to guarantee that its value fits into `usize` /// without using as much space as a `usize` on all targets, callers must /// not rely on this property for safety. Callers may choose to rely on this /// property for correctness however. For example, creating a `SmallIndex` with /// an invalid value can be done in entirely safe code. This may in turn result /// in panics or silent logical errors. #[derive( Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, )] #[repr(transparent)] pub struct SmallIndex(u32); impl SmallIndex { /// The maximum index value. #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] pub const MAX: SmallIndex = // FIXME: Use as_usize() once const functions in traits are stable. SmallIndex::new_unchecked(core::i32::MAX as usize - 1); /// The maximum index value. #[cfg(target_pointer_width = "16")] pub const MAX: SmallIndex = SmallIndex::new_unchecked(core::isize::MAX - 1); /// The total number of values that can be represented as a small index. pub const LIMIT: usize = SmallIndex::MAX.as_usize() + 1; /// The zero index value. pub const ZERO: SmallIndex = SmallIndex::new_unchecked(0); /// The number of bytes that a single small index uses in memory. pub const SIZE: usize = core::mem::size_of::<SmallIndex>(); /// Create a new small index. /// /// If the given index exceeds [`SmallIndex::MAX`], then this returns /// an error. #[inline] pub fn new(index: usize) -> Result<SmallIndex, SmallIndexError> { SmallIndex::try_from(index) } /// Create a new small index without checking whether the given value /// exceeds [`SmallIndex::MAX`]. /// /// Using this routine with an invalid index value will result in /// unspecified behavior, but *not* undefined behavior. In particular, an /// invalid index value is likely to cause panics or possibly even silent /// logical errors. /// /// Callers must never rely on a `SmallIndex` to be within a certain range /// for memory safety. #[inline] pub const fn new_unchecked(index: usize) -> SmallIndex { // FIXME: Use as_u32() once const functions in traits are stable. SmallIndex(index as u32) } /// Like [`SmallIndex::new`], but panics if the given index is not valid. #[inline] pub fn must(index: usize) -> SmallIndex { SmallIndex::new(index).expect("invalid small index") } /// Return this small index as a `usize`. This is guaranteed to never /// overflow `usize`. #[inline] pub const fn as_usize(&self) -> usize { // FIXME: Use as_usize() once const functions in traits are stable. self.0 as usize } /// Return this small index as a `u64`. This is guaranteed to never /// overflow. #[inline] pub const fn as_u64(&self) -> u64 { // FIXME: Use u64::from() once const functions in traits are stable. self.0 as u64 } /// Return the internal `u32` of this small index. This is guaranteed to /// never overflow `u32`. #[inline] pub const fn as_u32(&self) -> u32 { self.0 } /// Return the internal `u32` of this small index represented as an `i32`. /// This is guaranteed to never overflow an `i32`. #[inline] pub const fn as_i32(&self) -> i32 { // This is OK because we guarantee that our max value is <= i32::MAX. self.0 as i32 } /// Returns one more than this small index as a usize. /// /// Since a small index has constraints on its maximum value, adding `1` to /// it will always fit in a `usize`, `u32` and a `i32`. #[inline] pub fn one_more(&self) -> usize { self.as_usize() + 1 } /// Decode this small index from the bytes given using the native endian /// byte order for the current target. /// /// If the decoded integer is not representable as a small index for the /// current target, then this returns an error. #[inline] pub fn from_ne_bytes( bytes: [u8; 4], ) -> Result<SmallIndex, SmallIndexError> { let id = u32::from_ne_bytes(bytes); if id > SmallIndex::MAX.as_u32() { return Err(SmallIndexError { attempted: u64::from(id) }); } Ok(SmallIndex::new_unchecked(id.as_usize())) } /// Decode this small index from the bytes given using the native endian /// byte order for the current target. /// /// This is analogous to [`SmallIndex::new_unchecked`] in that is does not /// check whether the decoded integer is representable as a small index. #[inline] pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> SmallIndex { SmallIndex::new_unchecked(u32::from_ne_bytes(bytes).as_usize()) } /// Return the underlying small index integer as raw bytes in native endian /// format. #[inline] pub fn to_ne_bytes(&self) -> [u8; 4] { self.0.to_ne_bytes() } } impl<T> core::ops::Index<SmallIndex> for [T] { type Output = T; #[inline] fn index(&self, index: SmallIndex) -> &T { &self[index.as_usize()] } } impl<T> core::ops::IndexMut<SmallIndex> for [T] { #[inline] fn index_mut(&mut self, index: SmallIndex) -> &mut T { &mut self[index.as_usize()] } } #[cfg(feature = "alloc")] impl<T> core::ops::Index<SmallIndex> for Vec<T> { type Output = T; #[inline] fn index(&self, index: SmallIndex) -> &T { &self[index.as_usize()] } } #[cfg(feature = "alloc")] impl<T> core::ops::IndexMut<SmallIndex> for Vec<T> { #[inline] fn index_mut(&mut self, index: SmallIndex) -> &mut T { &mut self[index.as_usize()] } } impl From<u8> for SmallIndex { fn from(index: u8) -> SmallIndex { SmallIndex::new_unchecked(usize::from(index)) } } impl TryFrom<u16> for SmallIndex { type Error = SmallIndexError; fn try_from(index: u16) -> Result<SmallIndex, SmallIndexError> { if u32::from(index) > SmallIndex::MAX.as_u32() { return Err(SmallIndexError { attempted: u64::from(index) }); } Ok(SmallIndex::new_unchecked(index.as_usize())) } } impl TryFrom<u32> for SmallIndex { type Error = SmallIndexError; fn try_from(index: u32) -> Result<SmallIndex, SmallIndexError> { if index > SmallIndex::MAX.as_u32() { return Err(SmallIndexError { attempted: u64::from(index) }); } Ok(SmallIndex::new_unchecked(index.as_usize())) } } impl TryFrom<u64> for SmallIndex { type Error = SmallIndexError; fn try_from(index: u64) -> Result<SmallIndex, SmallIndexError> { if index > SmallIndex::MAX.as_u64() { return Err(SmallIndexError { attempted: index }); } Ok(SmallIndex::new_unchecked(index.as_usize())) } } impl TryFrom<usize> for SmallIndex { type Error = SmallIndexError; fn try_from(index: usize) -> Result<SmallIndex, SmallIndexError> { if index > SmallIndex::MAX.as_usize() { return Err(SmallIndexError { attempted: index.as_u64() }); } Ok(SmallIndex::new_unchecked(index)) } } #[cfg(test)] impl quickcheck::Arbitrary for SmallIndex { fn arbitrary(gen: &mut quickcheck::Gen) -> SmallIndex { use core::cmp::max; let id = max(i32::MIN + 1, i32::arbitrary(gen)).abs(); if id > SmallIndex::MAX.as_i32() { SmallIndex::MAX } else { SmallIndex::new(usize::try_from(id).unwrap()).unwrap() } } } /// This error occurs when a small index could not be constructed. /// /// This occurs when given an integer exceeding the maximum small index value. /// /// When the `std` feature is enabled, this implements the `Error` trait. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SmallIndexError { attempted: u64, } impl SmallIndexError { /// Returns the value that could not be converted to a small index. pub fn attempted(&self) -> u64 { self.attempted } } #[cfg(feature = "std")] impl std::error::Error for SmallIndexError {} impl core::fmt::Display for SmallIndexError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!( f, "failed to create small index from {:?}, which exceeds {:?}", self.attempted(), SmallIndex::MAX, ) } } #[derive(Clone, Debug)] pub(crate) struct SmallIndexIter { rng: core::ops::Range<usize>, } impl Iterator for SmallIndexIter { type Item = SmallIndex; fn next(&mut self) -> Option<SmallIndex> { if self.rng.start >= self.rng.end { return None; } let next_id = self.rng.start + 1; let id = core::mem::replace(&mut self.rng.start, next_id); // new_unchecked is OK since we asserted that the number of // elements in this iterator will fit in an ID at construction. Some(SmallIndex::new_unchecked(id)) } } macro_rules! index_type_impls { ($name:ident, $err:ident, $iter:ident, $withiter:ident) => { impl $name { /// The maximum value. pub const MAX: $name = $name(SmallIndex::MAX); /// The total number of values that can be represented. pub const LIMIT: usize = SmallIndex::LIMIT; /// The zero value. pub const ZERO: $name = $name(SmallIndex::ZERO); /// The number of bytes that a single value uses in memory. pub const SIZE: usize = SmallIndex::SIZE; /// Create a new value that is represented by a "small index." /// /// If the given index exceeds the maximum allowed value, then this /// returns an error. #[inline] pub fn new(value: usize) -> Result<$name, $err> { SmallIndex::new(value).map($name).map_err($err) } /// Create a new value without checking whether the given argument /// exceeds the maximum. /// /// Using this routine with an invalid value will result in /// unspecified behavior, but *not* undefined behavior. In /// particular, an invalid ID value is likely to cause panics or /// possibly even silent logical errors. /// /// Callers must never rely on this type to be within a certain /// range for memory safety. #[inline] pub const fn new_unchecked(value: usize) -> $name { $name(SmallIndex::new_unchecked(value)) } /// Like `new`, but panics if the given value is not valid. #[inline] pub fn must(value: usize) -> $name { $name::new(value).expect(concat!( "invalid ", stringify!($name), " value" )) } /// Return the internal value as a `usize`. This is guaranteed to /// never overflow `usize`. #[inline] pub const fn as_usize(&self) -> usize { self.0.as_usize() } /// Return the internal value as a `u64`. This is guaranteed to /// never overflow. #[inline] pub const fn as_u64(&self) -> u64 { self.0.as_u64() } /// Return the internal value as a `u32`. This is guaranteed to /// never overflow `u32`. #[inline] pub const fn as_u32(&self) -> u32 { self.0.as_u32() } /// Return the internal value as a i32`. This is guaranteed to /// never overflow an `i32`. #[inline] pub const fn as_i32(&self) -> i32 { self.0.as_i32() } /// Returns one more than this value as a usize. /// /// Since values represented by a "small index" have constraints /// on their maximum value, adding `1` to it will always fit in a /// `usize`, `u32` and a `i32`. #[inline] pub fn one_more(&self) -> usize { self.0.one_more() } /// Decode this value from the bytes given using the native endian /// byte order for the current target. /// /// If the decoded integer is not representable as a small index /// for the current target, then this returns an error. #[inline] pub fn from_ne_bytes(bytes: [u8; 4]) -> Result<$name, $err> { SmallIndex::from_ne_bytes(bytes).map($name).map_err($err) } /// Decode this value from the bytes given using the native endian /// byte order for the current target. /// /// This is analogous to `new_unchecked` in that is does not check /// whether the decoded integer is representable as a small index. #[inline] pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> $name { $name(SmallIndex::from_ne_bytes_unchecked(bytes)) } /// Return the underlying integer as raw bytes in native endian /// format. #[inline] pub fn to_ne_bytes(&self) -> [u8; 4] { self.0.to_ne_bytes() } /// Returns an iterator over all values from 0 up to and not /// including the given length. /// /// If the given length exceeds this type's limit, then this /// panics. pub(crate) fn iter(len: usize) -> $iter { $iter::new(len) } } // We write our own Debug impl so that we get things like PatternID(5) // instead of PatternID(SmallIndex(5)). impl core::fmt::Debug for $name { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_tuple(stringify!($name)).field(&self.as_u32()).finish() } } impl<T> core::ops::Index<$name> for [T] { type Output = T; #[inline] fn index(&self, index: $name) -> &T { &self[index.as_usize()] } } impl<T> core::ops::IndexMut<$name> for [T] { #[inline] fn index_mut(&mut self, index: $name) -> &mut T { &mut self[index.as_usize()] } } #[cfg(feature = "alloc")] impl<T> core::ops::Index<$name> for Vec<T> { type Output = T; #[inline] fn index(&self, index: $name) -> &T { &self[index.as_usize()] } } #[cfg(feature = "alloc")] impl<T> core::ops::IndexMut<$name> for Vec<T> { #[inline] fn index_mut(&mut self, index: $name) -> &mut T { &mut self[index.as_usize()] } } impl From<u8> for $name { fn from(value: u8) -> $name { $name(SmallIndex::from(value)) } } impl TryFrom<u16> for $name { type Error = $err; fn try_from(value: u16) -> Result<$name, $err> { SmallIndex::try_from(value).map($name).map_err($err) } } impl TryFrom<u32> for $name { type Error = $err; fn try_from(value: u32) -> Result<$name, $err> { SmallIndex::try_from(value).map($name).map_err($err) } } impl TryFrom<u64> for $name { type Error = $err; fn try_from(value: u64) -> Result<$name, $err> { SmallIndex::try_from(value).map($name).map_err($err) } } impl TryFrom<usize> for $name { type Error = $err; fn try_from(value: usize) -> Result<$name, $err> { SmallIndex::try_from(value).map($name).map_err($err) } } #[cfg(test)] impl quickcheck::Arbitrary for $name { fn arbitrary(gen: &mut quickcheck::Gen) -> $name { $name(SmallIndex::arbitrary(gen)) } } /// This error occurs when a value could not be constructed. /// /// This occurs when given an integer exceeding the maximum allowed /// value. /// /// When the `std` feature is enabled, this implements the `Error` /// trait. #[derive(Clone, Debug, Eq, PartialEq)] pub struct $err(SmallIndexError); impl $err { /// Returns the value that could not be converted to an ID. pub fn attempted(&self) -> u64 { self.0.attempted() } } #[cfg(feature = "std")] impl std::error::Error for $err {} impl core::fmt::Display for $err { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!( f, "failed to create {} from {:?}, which exceeds {:?}", stringify!($name), self.attempted(), $name::MAX, ) } } #[derive(Clone, Debug)] pub(crate) struct $iter(SmallIndexIter); impl $iter { fn new(len: usize) -> $iter { assert!( len <= $name::LIMIT, "cannot create iterator for {} when number of \ elements exceed {:?}", stringify!($name), $name::LIMIT, ); $iter(SmallIndexIter { rng: 0..len }) } } impl Iterator for $iter { type Item = $name; fn next(&mut self) -> Option<$name> { self.0.next().map($name) } } /// An iterator adapter that is like std::iter::Enumerate, but attaches /// small index values instead. It requires `ExactSizeIterator`. At /// construction, it ensures that the index of each element in the /// iterator is representable in the corresponding small index type. #[derive(Clone, Debug)] pub(crate) struct $withiter<I> { it: I, ids: $iter, } impl<I: Iterator + ExactSizeIterator> $withiter<I> { fn new(it: I) -> $withiter<I> { let ids = $name::iter(it.len()); $withiter { it, ids } } } impl<I: Iterator + ExactSizeIterator> Iterator for $withiter<I> { type Item = ($name, I::Item); fn next(&mut self) -> Option<($name, I::Item)> { let item = self.it.next()?; // Number of elements in this iterator must match, according // to contract of ExactSizeIterator. let id = self.ids.next().unwrap(); Some((id, item)) } } }; } /// The identifier of a regex pattern, represented by a [`SmallIndex`]. /// /// The identifier for a pattern corresponds to its relative position among /// other patterns in a single finite state machine. Namely, when building /// a multi-pattern regex engine, one must supply a sequence of patterns to /// match. The position (starting at 0) of each pattern in that sequence /// represents its identifier. This identifier is in turn used to identify and /// report matches of that pattern in various APIs. /// /// See the [`SmallIndex`] type for more information about what it means for /// a pattern ID to be a "small index." /// /// Note that this type is defined in the /// [`util::primitives`](crate::util::primitives) module, but it is also /// re-exported at the crate root due to how common it is. #[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] #[repr(transparent)] pub struct PatternID(SmallIndex); /// The identifier of a finite automaton state, represented by a /// [`SmallIndex`]. /// /// Most regex engines in this crate are built on top of finite automata. Each /// state in a finite automaton defines transitions from its state to another. /// Those transitions point to other states via their identifiers, i.e., a /// `StateID`. Since finite automata tend to contain many transitions, it is /// much more memory efficient to define state IDs as small indices. /// /// See the [`SmallIndex`] type for more information about what it means for /// a state ID to be a "small index." #[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] #[repr(transparent)] pub struct StateID(SmallIndex); index_type_impls!(PatternID, PatternIDError, PatternIDIter, WithPatternIDIter); index_type_impls!(StateID, StateIDError, StateIDIter, WithStateIDIter); /// A utility trait that defines a couple of adapters for making it convenient /// to access indices as "small index" types. We require ExactSizeIterator so /// that iterator construction can do a single check to make sure the index of /// each element is representable by its small index type. pub(crate) trait IteratorIndexExt: Iterator { fn with_pattern_ids(self) -> WithPatternIDIter<Self> where Self: Sized + ExactSizeIterator, { WithPatternIDIter::new(self) } fn with_state_ids(self) -> WithStateIDIter<Self> where Self: Sized + ExactSizeIterator, { WithStateIDIter::new(self) } } impl<I: Iterator> IteratorIndexExt for I {} <file_sep>/regex-automata/src/hybrid/id.rs /// A state identifier specifically tailored for lazy DFAs. /// /// A lazy state ID logically represents a pointer to a DFA state. In practice, /// by limiting the number of DFA states it can address, it reserves some /// bits of its representation to encode some additional information. That /// additional information is called a "tag." That tag is used to record /// whether the state it points to is an unknown, dead, quit, start or match /// state. /// /// When implementing a low level search routine with a lazy DFA, it is /// necessary to query the type of the current state to know what to do: /// /// * **Unknown** - The state has not yet been computed. The /// parameters used to get this state ID must be re-passed to /// [`DFA::next_state`](crate::hybrid::dfa::DFA::next_state), which will never /// return an unknown state ID. /// * **Dead** - A dead state only has transitions to itself. It indicates that /// the search cannot do anything else and should stop with whatever result it /// has. /// * **Quit** - A quit state indicates that the automaton could not answer /// whether a match exists or not. Correct search implementations must return a /// [`MatchError::quit`](crate::MatchError::quit) when a DFA enters a quit /// state. /// * **Start** - A start state is a state in which a search can begin. /// Lazy DFAs usually have more than one start state. Branching on /// this isn't required for correctness, but a common optimization is /// to run a prefilter when a search enters a start state. Note that /// start states are *not* tagged automatically, and one must enable the /// [`Config::specialize_start_states`](crate::hybrid::dfa::Config::specialize_start_states) /// setting for start states to be tagged. The reason for this is /// that a DFA search loop is usually written to execute a prefilter once it /// enters a start state. But if there is no prefilter, this handling can be /// quite diastrous as the DFA may ping-pong between the special handling code /// and a possible optimized hot path for handling untagged states. When start /// states aren't specialized, then they are untagged and remain in the hot /// path. /// * **Match** - A match state indicates that a match has been found. /// Depending on the semantics of your search implementation, it may either /// continue until the end of the haystack or a dead state, or it might quit /// and return the match immediately. /// /// As an optimization, the [`is_tagged`](LazyStateID::is_tagged) predicate /// can be used to determine if a tag exists at all. This is useful to avoid /// branching on all of the above types for every byte searched. /// /// # Example /// /// This example shows how `LazyStateID` can be used to implement a correct /// search routine with minimal branching. In particular, this search routine /// implements "leftmost" matching, which means that it doesn't immediately /// stop once a match is found. Instead, it continues until it reaches a dead /// state. /// /// Notice also how a correct search implementation deals with /// [`CacheError`](crate::hybrid::CacheError)s returned by some of /// the lazy DFA routines. When a `CacheError` occurs, it returns /// [`MatchError::gave_up`](crate::MatchError::gave_up). /// /// ``` /// use regex_automata::{ /// hybrid::dfa::{Cache, DFA}, /// HalfMatch, MatchError, Input, /// }; /// /// fn find_leftmost_first( /// dfa: &DFA, /// cache: &mut Cache, /// haystack: &[u8], /// ) -> Result<Option<HalfMatch>, MatchError> { /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. Note that start states can never /// // be match states (since DFAs in this crate delay matches by 1 /// // byte), so we don't need to check if the start state is a match. /// let mut sid = dfa.start_state_forward( /// cache, /// &Input::new(haystack), /// )?; /// let mut last_match = None; /// // Walk all the bytes in the haystack. We can quit early if we see /// // a dead or a quit state. The former means the automaton will /// // never transition to any other state. The latter means that the /// // automaton entered a condition in which its search failed. /// for (i, &b) in haystack.iter().enumerate() { /// sid = dfa /// .next_state(cache, sid, b) /// .map_err(|_| MatchError::gave_up(i))?; /// if sid.is_tagged() { /// if sid.is_match() { /// last_match = Some(HalfMatch::new( /// dfa.match_pattern(cache, sid, 0), /// i, /// )); /// } else if sid.is_dead() { /// return Ok(last_match); /// } else if sid.is_quit() { /// // It is possible to enter into a quit state after /// // observing a match has occurred. In that case, we /// // should return the match instead of an error. /// if last_match.is_some() { /// return Ok(last_match); /// } /// return Err(MatchError::quit(b, i)); /// } /// // Implementors may also want to check for start states and /// // handle them differently for performance reasons. But it is /// // not necessary for correctness. Note that in order to check /// // for start states, you'll need to enable the /// // 'specialize_start_states' config knob, otherwise start /// // states will not be tagged. /// } /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk /// // the special "EOI" transition at the end of the search. /// sid = dfa /// .next_eoi_state(cache, sid) /// .map_err(|_| MatchError::gave_up(haystack.len()))?; /// if sid.is_match() { /// last_match = Some(HalfMatch::new( /// dfa.match_pattern(cache, sid, 0), /// haystack.len(), /// )); /// } /// Ok(last_match) /// } /// /// // We use a greedy '+' operator to show how the search doesn't just stop /// // once a match is detected. It continues extending the match. Using /// // '[a-z]+?' would also work as expected and stop the search early. /// // Greediness is built into the automaton. /// let dfa = DFA::new(r"[a-z]+")?; /// let mut cache = dfa.create_cache(); /// let haystack = "123 foobar 4567".as_bytes(); /// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 10); /// /// // Here's another example that tests our handling of the special /// // EOI transition. This will fail to find a match if we don't call /// // 'next_eoi_state' at the end of the search since the match isn't found /// // until the final byte in the haystack. /// let dfa = DFA::new(r"[0-9]{4}")?; /// let mut cache = dfa.create_cache(); /// let haystack = "123 foobar 4567".as_bytes(); /// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 15); /// /// // And note that our search implementation above automatically works /// // with multi-DFAs. Namely, `dfa.match_pattern(match_state, 0)` selects /// // the appropriate pattern ID for us. /// let dfa = DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; /// let mut cache = dfa.create_cache(); /// let haystack = "123 foobar 4567".as_bytes(); /// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 1); /// assert_eq!(mat.offset(), 3); /// let mat = find_leftmost_first(&dfa, &mut cache, &haystack[3..])?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 7); /// let mat = find_leftmost_first(&dfa, &mut cache, &haystack[10..])?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 1); /// assert_eq!(mat.offset(), 5); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive( Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, )] pub struct LazyStateID(u32); impl LazyStateID { #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] const MAX_BIT: usize = 31; #[cfg(target_pointer_width = "16")] const MAX_BIT: usize = 15; const MASK_UNKNOWN: usize = 1 << (LazyStateID::MAX_BIT); const MASK_DEAD: usize = 1 << (LazyStateID::MAX_BIT - 1); const MASK_QUIT: usize = 1 << (LazyStateID::MAX_BIT - 2); const MASK_START: usize = 1 << (LazyStateID::MAX_BIT - 3); const MASK_MATCH: usize = 1 << (LazyStateID::MAX_BIT - 4); const MAX: usize = LazyStateID::MASK_MATCH - 1; /// Create a new lazy state ID. /// /// If the given identifier exceeds [`LazyStateID::MAX`], then this returns /// an error. #[inline] pub(crate) fn new(id: usize) -> Result<LazyStateID, LazyStateIDError> { if id > LazyStateID::MAX { let attempted = u64::try_from(id).unwrap(); return Err(LazyStateIDError { attempted }); } Ok(LazyStateID::new_unchecked(id)) } /// Create a new lazy state ID without checking whether the given value /// exceeds [`LazyStateID::MAX`]. /// /// While this is unchecked, providing an incorrect value must never /// sacrifice memory safety. #[inline] const fn new_unchecked(id: usize) -> LazyStateID { // FIXME: Use as_u32() once const functions in traits are stable. LazyStateID(id as u32) } /// Return this lazy state ID as an untagged `usize`. /// /// If this lazy state ID is tagged, then the usize returned is the state /// ID without the tag. If the ID was not tagged, then the usize returned /// is equivalent to the state ID. #[inline] pub(crate) fn as_usize_untagged(&self) -> usize { self.as_usize_unchecked() & LazyStateID::MAX } /// Return this lazy state ID as its raw internal `usize` value, which may /// be tagged (and thus greater than LazyStateID::MAX). #[inline] pub(crate) const fn as_usize_unchecked(&self) -> usize { // FIXME: Use as_usize() once const functions in traits are stable. self.0 as usize } #[inline] pub(crate) const fn to_unknown(&self) -> LazyStateID { LazyStateID::new_unchecked( self.as_usize_unchecked() | LazyStateID::MASK_UNKNOWN, ) } #[inline] pub(crate) const fn to_dead(&self) -> LazyStateID { LazyStateID::new_unchecked( self.as_usize_unchecked() | LazyStateID::MASK_DEAD, ) } #[inline] pub(crate) const fn to_quit(&self) -> LazyStateID { LazyStateID::new_unchecked( self.as_usize_unchecked() | LazyStateID::MASK_QUIT, ) } /// Return this lazy state ID as a state ID that is tagged as a start /// state. #[inline] pub(crate) const fn to_start(&self) -> LazyStateID { LazyStateID::new_unchecked( self.as_usize_unchecked() | LazyStateID::MASK_START, ) } /// Return this lazy state ID as a lazy state ID that is tagged as a match /// state. #[inline] pub(crate) const fn to_match(&self) -> LazyStateID { LazyStateID::new_unchecked( self.as_usize_unchecked() | LazyStateID::MASK_MATCH, ) } /// Return true if and only if this lazy state ID is tagged. /// /// When a lazy state ID is tagged, then one can conclude that it is one /// of a match, start, dead, quit or unknown state. #[inline] pub const fn is_tagged(&self) -> bool { self.as_usize_unchecked() > LazyStateID::MAX } /// Return true if and only if this represents a lazy state ID that is /// "unknown." That is, the state has not yet been created. When a caller /// sees this state ID, it generally means that a state has to be computed /// in order to proceed. #[inline] pub const fn is_unknown(&self) -> bool { self.as_usize_unchecked() & LazyStateID::MASK_UNKNOWN > 0 } /// Return true if and only if this represents a dead state. A dead state /// is a state that can never transition to any other state except the /// dead state. When a dead state is seen, it generally indicates that a /// search should stop. #[inline] pub const fn is_dead(&self) -> bool { self.as_usize_unchecked() & LazyStateID::MASK_DEAD > 0 } /// Return true if and only if this represents a quit state. A quit state /// is a state that is representationally equivalent to a dead state, /// except it indicates the automaton has reached a point at which it can /// no longer determine whether a match exists or not. In general, this /// indicates an error during search and the caller must either pass this /// error up or use a different search technique. #[inline] pub const fn is_quit(&self) -> bool { self.as_usize_unchecked() & LazyStateID::MASK_QUIT > 0 } /// Return true if and only if this lazy state ID has been tagged as a /// start state. /// /// Note that if /// [`Config::specialize_start_states`](crate::hybrid::dfa::Config) is /// disabled (which is the default), then this will always return false /// since start states won't be tagged. #[inline] pub const fn is_start(&self) -> bool { self.as_usize_unchecked() & LazyStateID::MASK_START > 0 } /// Return true if and only if this lazy state ID has been tagged as a /// match state. #[inline] pub const fn is_match(&self) -> bool { self.as_usize_unchecked() & LazyStateID::MASK_MATCH > 0 } } /// This error occurs when a lazy state ID could not be constructed. /// /// This occurs when given an integer exceeding the maximum lazy state ID /// value. /// /// When the `std` feature is enabled, this implements the `Error` trait. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct LazyStateIDError { attempted: u64, } impl LazyStateIDError { /// Returns the value that failed to constructed a lazy state ID. pub(crate) fn attempted(&self) -> u64 { self.attempted } } #[cfg(feature = "std")] impl std::error::Error for LazyStateIDError {} impl core::fmt::Display for LazyStateIDError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!( f, "failed to create LazyStateID from {:?}, which exceeds {:?}", self.attempted(), LazyStateID::MAX, ) } } <file_sep>/regex-syntax/src/unicode.rs use alloc::{ string::{String, ToString}, vec::Vec, }; use crate::hir; /// An inclusive range of codepoints from a generated file (hence the static /// lifetime). type Range = &'static [(char, char)]; /// An error that occurs when dealing with Unicode. /// /// We don't impl the Error trait here because these always get converted /// into other public errors. (This error type isn't exported.) #[derive(Debug)] pub enum Error { PropertyNotFound, PropertyValueNotFound, // Not used when unicode-perl is enabled. #[allow(dead_code)] PerlClassNotFound, } /// An error that occurs when Unicode-aware simple case folding fails. /// /// This error can occur when the case mapping tables necessary for Unicode /// aware case folding are unavailable. This only occurs when the /// `unicode-case` feature is disabled. (The feature is enabled by default.) #[derive(Debug)] pub struct CaseFoldError(()); #[cfg(feature = "std")] impl std::error::Error for CaseFoldError {} impl core::fmt::Display for CaseFoldError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( f, "Unicode-aware case folding is not available \ (probably because the unicode-case feature is not enabled)" ) } } /// An error that occurs when the Unicode-aware `\w` class is unavailable. /// /// This error can occur when the data tables necessary for the Unicode aware /// Perl character class `\w` are unavailable. This only occurs when the /// `unicode-perl` feature is disabled. (The feature is enabled by default.) #[derive(Debug)] pub struct UnicodeWordError(()); #[cfg(feature = "std")] impl std::error::Error for UnicodeWordError {} impl core::fmt::Display for UnicodeWordError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( f, "Unicode-aware \\w class is not available \ (probably because the unicode-perl feature is not enabled)" ) } } /// A state oriented traverser of the simple case folding table. /// /// A case folder can be constructed via `SimpleCaseFolder::new()`, which will /// return an error if the underlying case folding table is unavailable. /// /// After construction, it is expected that callers will use /// `SimpleCaseFolder::mapping` by calling it with codepoints in strictly /// increasing order. For example, calling it on `b` and then on `a` is illegal /// and will result in a panic. /// /// The main idea of this type is that it tries hard to make mapping lookups /// fast by exploiting the structure of the underlying table, and the ordering /// assumption enables this. #[derive(Debug)] pub struct SimpleCaseFolder { /// The simple case fold table. It's a sorted association list, where the /// keys are Unicode scalar values and the values are the corresponding /// equivalence class (not including the key) of the "simple" case folded /// Unicode scalar values. table: &'static [(char, &'static [char])], /// The last codepoint that was used for a lookup. last: Option<char>, /// The index to the entry in `table` corresponding to the smallest key `k` /// such that `k > k0`, where `k0` is the most recent key lookup. Note that /// in particular, `k0` may not be in the table! next: usize, } impl SimpleCaseFolder { /// Create a new simple case folder, returning an error if the underlying /// case folding table is unavailable. pub fn new() -> Result<SimpleCaseFolder, CaseFoldError> { #[cfg(not(feature = "unicode-case"))] { Err(CaseFoldError(())) } #[cfg(feature = "unicode-case")] { Ok(SimpleCaseFolder { table: crate::unicode_tables::case_folding_simple::CASE_FOLDING_SIMPLE, last: None, next: 0, }) } } /// Return the equivalence class of case folded codepoints for the given /// codepoint. The equivalence class returned never includes the codepoint /// given. If the given codepoint has no case folded codepoints (i.e., /// no entry in the underlying case folding table), then this returns an /// empty slice. /// /// # Panics /// /// This panics when called with a `c` that is less than or equal to the /// previous call. In other words, callers need to use this method with /// strictly increasing values of `c`. pub fn mapping(&mut self, c: char) -> &'static [char] { if let Some(last) = self.last { assert!( last < c, "got codepoint U+{:X} which occurs before \ last codepoint U+{:X}", u32::from(c), u32::from(last), ); } self.last = Some(c); if self.next >= self.table.len() { return &[]; } let (k, v) = self.table[self.next]; if k == c { self.next += 1; return v; } match self.get(c) { Err(i) => { self.next = i; &[] } Ok(i) => { // Since we require lookups to proceed // in order, anything we find should be // after whatever we thought might be // next. Otherwise, the caller is either // going out of order or we would have // found our next key at 'self.next'. assert!(i > self.next); self.next = i + 1; self.table[i].1 } } } /// Returns true if and only if the given range overlaps with any region /// of the underlying case folding table. That is, when true, there exists /// at least one codepoint in the inclusive range `[start, end]` that has /// a non-trivial equivalence class of case folded codepoints. Conversely, /// when this returns false, all codepoints in the range `[start, end]` /// correspond to the trivial equivalence class of case folded codepoints, /// i.e., itself. /// /// This is useful to call before iterating over the codepoints in the /// range and looking up the mapping for each. If you know none of the /// mappings will return anything, then you might be able to skip doing it /// altogether. /// /// # Panics /// /// This panics when `end < start`. pub fn overlaps(&self, start: char, end: char) -> bool { use core::cmp::Ordering; assert!(start <= end); self.table .binary_search_by(|&(c, _)| { if start <= c && c <= end { Ordering::Equal } else if c > end { Ordering::Greater } else { Ordering::Less } }) .is_ok() } /// Returns the index at which `c` occurs in the simple case fold table. If /// `c` does not occur, then this returns an `i` such that `table[i-1].0 < /// c` and `table[i].0 > c`. fn get(&self, c: char) -> Result<usize, usize> { self.table.binary_search_by_key(&c, |&(c1, _)| c1) } } /// A query for finding a character class defined by Unicode. This supports /// either use of a property name directly, or lookup by property value. The /// former generally refers to Binary properties (see UTS#44, Table 8), but /// as a special exception (see UTS#18, Section 1.2) both general categories /// (an enumeration) and scripts (a catalog) are supported as if each of their /// possible values were a binary property. /// /// In all circumstances, property names and values are normalized and /// canonicalized. That is, `GC == gc == GeneralCategory == general_category`. /// /// The lifetime `'a` refers to the shorter of the lifetimes of property name /// and property value. #[derive(Debug)] pub enum ClassQuery<'a> { /// Return a class corresponding to a Unicode binary property, named by /// a single letter. OneLetter(char), /// Return a class corresponding to a Unicode binary property. /// /// Note that, by special exception (see UTS#18, Section 1.2), both /// general category values and script values are permitted here as if /// they were a binary property. Binary(&'a str), /// Return a class corresponding to all codepoints whose property /// (identified by `property_name`) corresponds to the given value /// (identified by `property_value`). ByValue { /// A property name. property_name: &'a str, /// A property value. property_value: &'a str, }, } impl<'a> ClassQuery<'a> { fn canonicalize(&self) -> Result<CanonicalClassQuery, Error> { match *self { ClassQuery::OneLetter(c) => self.canonical_binary(&c.to_string()), ClassQuery::Binary(name) => self.canonical_binary(name), ClassQuery::ByValue { property_name, property_value } => { let property_name = symbolic_name_normalize(property_name); let property_value = symbolic_name_normalize(property_value); let canon_name = match canonical_prop(&property_name)? { None => return Err(Error::PropertyNotFound), Some(canon_name) => canon_name, }; Ok(match canon_name { "General_Category" => { let canon = match canonical_gencat(&property_value)? { None => return Err(Error::PropertyValueNotFound), Some(canon) => canon, }; CanonicalClassQuery::GeneralCategory(canon) } "Script" => { let canon = match canonical_script(&property_value)? { None => return Err(Error::PropertyValueNotFound), Some(canon) => canon, }; CanonicalClassQuery::Script(canon) } _ => { let vals = match property_values(canon_name)? { None => return Err(Error::PropertyValueNotFound), Some(vals) => vals, }; let canon_val = match canonical_value(vals, &property_value) { None => { return Err(Error::PropertyValueNotFound) } Some(canon_val) => canon_val, }; CanonicalClassQuery::ByValue { property_name: canon_name, property_value: canon_val, } } }) } } } fn canonical_binary( &self, name: &str, ) -> Result<CanonicalClassQuery, Error> { let norm = symbolic_name_normalize(name); // This is a special case where 'cf' refers to the 'Format' general // category, but where the 'cf' abbreviation is also an abbreviation // for the 'Case_Folding' property. But we want to treat it as // a general category. (Currently, we don't even support the // 'Case_Folding' property. But if we do in the future, users will be // required to spell it out.) // // Also 'sc' refers to the 'Currency_Symbol' general category, but is // also the abbreviation for the 'Script' property. So we avoid calling // 'canonical_prop' for it too, which would erroneously normalize it // to 'Script'. // // Another case: 'lc' is an abbreviation for the 'Cased_Letter' // general category, but is also an abbreviation for the 'Lowercase_Mapping' // property. We don't currently support the latter, so as with 'cf' // above, we treat 'lc' as 'Cased_Letter'. if norm != "cf" && norm != "sc" && norm != "lc" { if let Some(canon) = canonical_prop(&norm)? { return Ok(CanonicalClassQuery::Binary(canon)); } } if let Some(canon) = canonical_gencat(&norm)? { return Ok(CanonicalClassQuery::GeneralCategory(canon)); } if let Some(canon) = canonical_script(&norm)? { return Ok(CanonicalClassQuery::Script(canon)); } Err(Error::PropertyNotFound) } } /// Like ClassQuery, but its parameters have been canonicalized. This also /// differentiates binary properties from flattened general categories and /// scripts. #[derive(Debug, Eq, PartialEq)] enum CanonicalClassQuery { /// The canonical binary property name. Binary(&'static str), /// The canonical general category name. GeneralCategory(&'static str), /// The canonical script name. Script(&'static str), /// An arbitrary association between property and value, both of which /// have been canonicalized. /// /// Note that by construction, the property name of ByValue will never /// be General_Category or Script. Those two cases are subsumed by the /// eponymous variants. ByValue { /// The canonical property name. property_name: &'static str, /// The canonical property value. property_value: &'static str, }, } /// Looks up a Unicode class given a query. If one doesn't exist, then /// `None` is returned. pub fn class(query: ClassQuery<'_>) -> Result<hir::ClassUnicode, Error> { use self::CanonicalClassQuery::*; match query.canonicalize()? { Binary(name) => bool_property(name), GeneralCategory(name) => gencat(name), Script(name) => script(name), ByValue { property_name: "Age", property_value } => { let mut class = hir::ClassUnicode::empty(); for set in ages(property_value)? { class.union(&hir_class(set)); } Ok(class) } ByValue { property_name: "Script_Extensions", property_value } => { script_extension(property_value) } ByValue { property_name: "Grapheme_Cluster_Break", property_value, } => gcb(property_value), ByValue { property_name: "Sentence_Break", property_value } => { sb(property_value) } ByValue { property_name: "Word_Break", property_value } => { wb(property_value) } _ => { // What else should we support? Err(Error::PropertyNotFound) } } } /// Returns a Unicode aware class for \w. /// /// This returns an error if the data is not available for \w. pub fn perl_word() -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-perl"))] fn imp() -> Result<hir::ClassUnicode, Error> { Err(Error::PerlClassNotFound) } #[cfg(feature = "unicode-perl")] fn imp() -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::perl_word::PERL_WORD; Ok(hir_class(PERL_WORD)) } imp() } /// Returns a Unicode aware class for \s. /// /// This returns an error if the data is not available for \s. pub fn perl_space() -> Result<hir::ClassUnicode, Error> { #[cfg(not(any(feature = "unicode-perl", feature = "unicode-bool")))] fn imp() -> Result<hir::ClassUnicode, Error> { Err(Error::PerlClassNotFound) } #[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))] fn imp() -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::perl_space::WHITE_SPACE; Ok(hir_class(WHITE_SPACE)) } #[cfg(feature = "unicode-bool")] fn imp() -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::property_bool::WHITE_SPACE; Ok(hir_class(WHITE_SPACE)) } imp() } /// Returns a Unicode aware class for \d. /// /// This returns an error if the data is not available for \d. pub fn perl_digit() -> Result<hir::ClassUnicode, Error> { #[cfg(not(any(feature = "unicode-perl", feature = "unicode-gencat")))] fn imp() -> Result<hir::ClassUnicode, Error> { Err(Error::PerlClassNotFound) } #[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))] fn imp() -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::perl_decimal::DECIMAL_NUMBER; Ok(hir_class(DECIMAL_NUMBER)) } #[cfg(feature = "unicode-gencat")] fn imp() -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::general_category::DECIMAL_NUMBER; Ok(hir_class(DECIMAL_NUMBER)) } imp() } /// Build a Unicode HIR class from a sequence of Unicode scalar value ranges. pub fn hir_class(ranges: &[(char, char)]) -> hir::ClassUnicode { let hir_ranges: Vec<hir::ClassUnicodeRange> = ranges .iter() .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) .collect(); hir::ClassUnicode::new(hir_ranges) } /// Returns true only if the given codepoint is in the `\w` character class. /// /// If the `unicode-perl` feature is not enabled, then this returns an error. pub fn is_word_character(c: char) -> Result<bool, UnicodeWordError> { #[cfg(not(feature = "unicode-perl"))] fn imp(_: char) -> Result<bool, UnicodeWordError> { Err(UnicodeWordError(())) } #[cfg(feature = "unicode-perl")] fn imp(c: char) -> Result<bool, UnicodeWordError> { use crate::{is_word_byte, unicode_tables::perl_word::PERL_WORD}; if u8::try_from(c).map_or(false, is_word_byte) { return Ok(true); } Ok(PERL_WORD .binary_search_by(|&(start, end)| { use core::cmp::Ordering; if start <= c && c <= end { Ordering::Equal } else if start > c { Ordering::Greater } else { Ordering::Less } }) .is_ok()) } imp(c) } /// A mapping of property values for a specific property. /// /// The first element of each tuple is a normalized property value while the /// second element of each tuple is the corresponding canonical property /// value. type PropertyValues = &'static [(&'static str, &'static str)]; fn canonical_gencat( normalized_value: &str, ) -> Result<Option<&'static str>, Error> { Ok(match normalized_value { "any" => Some("Any"), "assigned" => Some("Assigned"), "ascii" => Some("ASCII"), _ => { let gencats = property_values("General_Category")?.unwrap(); canonical_value(gencats, normalized_value) } }) } fn canonical_script( normalized_value: &str, ) -> Result<Option<&'static str>, Error> { let scripts = property_values("Script")?.unwrap(); Ok(canonical_value(scripts, normalized_value)) } /// Find the canonical property name for the given normalized property name. /// /// If no such property exists, then `None` is returned. /// /// The normalized property name must have been normalized according to /// UAX44 LM3, which can be done using `symbolic_name_normalize`. /// /// If the property names data is not available, then an error is returned. fn canonical_prop( normalized_name: &str, ) -> Result<Option<&'static str>, Error> { #[cfg(not(any( feature = "unicode-age", feature = "unicode-bool", feature = "unicode-gencat", feature = "unicode-perl", feature = "unicode-script", feature = "unicode-segment", )))] fn imp(_: &str) -> Result<Option<&'static str>, Error> { Err(Error::PropertyNotFound) } #[cfg(any( feature = "unicode-age", feature = "unicode-bool", feature = "unicode-gencat", feature = "unicode-perl", feature = "unicode-script", feature = "unicode-segment", ))] fn imp(name: &str) -> Result<Option<&'static str>, Error> { use crate::unicode_tables::property_names::PROPERTY_NAMES; Ok(PROPERTY_NAMES .binary_search_by_key(&name, |&(n, _)| n) .ok() .map(|i| PROPERTY_NAMES[i].1)) } imp(normalized_name) } /// Find the canonical property value for the given normalized property /// value. /// /// The given property values should correspond to the values for the property /// under question, which can be found using `property_values`. /// /// If no such property value exists, then `None` is returned. /// /// The normalized property value must have been normalized according to /// UAX44 LM3, which can be done using `symbolic_name_normalize`. fn canonical_value( vals: PropertyValues, normalized_value: &str, ) -> Option<&'static str> { vals.binary_search_by_key(&normalized_value, |&(n, _)| n) .ok() .map(|i| vals[i].1) } /// Return the table of property values for the given property name. /// /// If the property values data is not available, then an error is returned. fn property_values( canonical_property_name: &'static str, ) -> Result<Option<PropertyValues>, Error> { #[cfg(not(any( feature = "unicode-age", feature = "unicode-bool", feature = "unicode-gencat", feature = "unicode-perl", feature = "unicode-script", feature = "unicode-segment", )))] fn imp(_: &'static str) -> Result<Option<PropertyValues>, Error> { Err(Error::PropertyValueNotFound) } #[cfg(any( feature = "unicode-age", feature = "unicode-bool", feature = "unicode-gencat", feature = "unicode-perl", feature = "unicode-script", feature = "unicode-segment", ))] fn imp(name: &'static str) -> Result<Option<PropertyValues>, Error> { use crate::unicode_tables::property_values::PROPERTY_VALUES; Ok(PROPERTY_VALUES .binary_search_by_key(&name, |&(n, _)| n) .ok() .map(|i| PROPERTY_VALUES[i].1)) } imp(canonical_property_name) } // This is only used in some cases, but small enough to just let it be dead // instead of figuring out (and maintaining) the right set of features. #[allow(dead_code)] fn property_set( name_map: &'static [(&'static str, Range)], canonical: &'static str, ) -> Option<Range> { name_map .binary_search_by_key(&canonical, |x| x.0) .ok() .map(|i| name_map[i].1) } /// Returns an iterator over Unicode Age sets. Each item corresponds to a set /// of codepoints that were added in a particular revision of Unicode. The /// iterator yields items in chronological order. /// /// If the given age value isn't valid or if the data isn't available, then an /// error is returned instead. fn ages(canonical_age: &str) -> Result<impl Iterator<Item = Range>, Error> { #[cfg(not(feature = "unicode-age"))] fn imp(_: &str) -> Result<impl Iterator<Item = Range>, Error> { use core::option::IntoIter; Err::<IntoIter<Range>, _>(Error::PropertyNotFound) } #[cfg(feature = "unicode-age")] fn imp(canonical_age: &str) -> Result<impl Iterator<Item = Range>, Error> { use crate::unicode_tables::age; const AGES: &[(&str, Range)] = &[ ("V1_1", age::V1_1), ("V2_0", age::V2_0), ("V2_1", age::V2_1), ("V3_0", age::V3_0), ("V3_1", age::V3_1), ("V3_2", age::V3_2), ("V4_0", age::V4_0), ("V4_1", age::V4_1), ("V5_0", age::V5_0), ("V5_1", age::V5_1), ("V5_2", age::V5_2), ("V6_0", age::V6_0), ("V6_1", age::V6_1), ("V6_2", age::V6_2), ("V6_3", age::V6_3), ("V7_0", age::V7_0), ("V8_0", age::V8_0), ("V9_0", age::V9_0), ("V10_0", age::V10_0), ("V11_0", age::V11_0), ("V12_0", age::V12_0), ("V12_1", age::V12_1), ("V13_0", age::V13_0), ("V14_0", age::V14_0), ("V15_0", age::V15_0), ]; assert_eq!(AGES.len(), age::BY_NAME.len(), "ages are out of sync"); let pos = AGES.iter().position(|&(age, _)| canonical_age == age); match pos { None => Err(Error::PropertyValueNotFound), Some(i) => Ok(AGES[..=i].iter().map(|&(_, classes)| classes)), } } imp(canonical_age) } /// Returns the Unicode HIR class corresponding to the given general category. /// /// Name canonicalization is assumed to be performed by the caller. /// /// If the given general category could not be found, or if the general /// category data is not available, then an error is returned. fn gencat(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-gencat"))] fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> { Err(Error::PropertyNotFound) } #[cfg(feature = "unicode-gencat")] fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::general_category::BY_NAME; match name { "ASCII" => Ok(hir_class(&[('\0', '\x7F')])), "Any" => Ok(hir_class(&[('\0', '\u{10FFFF}')])), "Assigned" => { let mut cls = gencat("Unassigned")?; cls.negate(); Ok(cls) } name => property_set(BY_NAME, name) .map(hir_class) .ok_or(Error::PropertyValueNotFound), } } match canonical_name { "Decimal_Number" => perl_digit(), name => imp(name), } } /// Returns the Unicode HIR class corresponding to the given script. /// /// Name canonicalization is assumed to be performed by the caller. /// /// If the given script could not be found, or if the script data is not /// available, then an error is returned. fn script(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-script"))] fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> { Err(Error::PropertyNotFound) } #[cfg(feature = "unicode-script")] fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::script::BY_NAME; property_set(BY_NAME, name) .map(hir_class) .ok_or(Error::PropertyValueNotFound) } imp(canonical_name) } /// Returns the Unicode HIR class corresponding to the given script extension. /// /// Name canonicalization is assumed to be performed by the caller. /// /// If the given script extension could not be found, or if the script data is /// not available, then an error is returned. fn script_extension( canonical_name: &'static str, ) -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-script"))] fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> { Err(Error::PropertyNotFound) } #[cfg(feature = "unicode-script")] fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::script_extension::BY_NAME; property_set(BY_NAME, name) .map(hir_class) .ok_or(Error::PropertyValueNotFound) } imp(canonical_name) } /// Returns the Unicode HIR class corresponding to the given Unicode boolean /// property. /// /// Name canonicalization is assumed to be performed by the caller. /// /// If the given boolean property could not be found, or if the boolean /// property data is not available, then an error is returned. fn bool_property( canonical_name: &'static str, ) -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-bool"))] fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> { Err(Error::PropertyNotFound) } #[cfg(feature = "unicode-bool")] fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::property_bool::BY_NAME; property_set(BY_NAME, name) .map(hir_class) .ok_or(Error::PropertyNotFound) } match canonical_name { "Decimal_Number" => perl_digit(), "White_Space" => perl_space(), name => imp(name), } } /// Returns the Unicode HIR class corresponding to the given grapheme cluster /// break property. /// /// Name canonicalization is assumed to be performed by the caller. /// /// If the given property could not be found, or if the corresponding data is /// not available, then an error is returned. fn gcb(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-segment"))] fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> { Err(Error::PropertyNotFound) } #[cfg(feature = "unicode-segment")] fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::grapheme_cluster_break::BY_NAME; property_set(BY_NAME, name) .map(hir_class) .ok_or(Error::PropertyValueNotFound) } imp(canonical_name) } /// Returns the Unicode HIR class corresponding to the given word break /// property. /// /// Name canonicalization is assumed to be performed by the caller. /// /// If the given property could not be found, or if the corresponding data is /// not available, then an error is returned. fn wb(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-segment"))] fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> { Err(Error::PropertyNotFound) } #[cfg(feature = "unicode-segment")] fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::word_break::BY_NAME; property_set(BY_NAME, name) .map(hir_class) .ok_or(Error::PropertyValueNotFound) } imp(canonical_name) } /// Returns the Unicode HIR class corresponding to the given sentence /// break property. /// /// Name canonicalization is assumed to be performed by the caller. /// /// If the given property could not be found, or if the corresponding data is /// not available, then an error is returned. fn sb(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> { #[cfg(not(feature = "unicode-segment"))] fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> { Err(Error::PropertyNotFound) } #[cfg(feature = "unicode-segment")] fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> { use crate::unicode_tables::sentence_break::BY_NAME; property_set(BY_NAME, name) .map(hir_class) .ok_or(Error::PropertyValueNotFound) } imp(canonical_name) } /// Like symbolic_name_normalize_bytes, but operates on a string. fn symbolic_name_normalize(x: &str) -> String { let mut tmp = x.as_bytes().to_vec(); let len = symbolic_name_normalize_bytes(&mut tmp).len(); tmp.truncate(len); // This should always succeed because `symbolic_name_normalize_bytes` // guarantees that `&tmp[..len]` is always valid UTF-8. // // N.B. We could avoid the additional UTF-8 check here, but it's unlikely // to be worth skipping the additional safety check. A benchmark must // justify it first. String::from_utf8(tmp).unwrap() } /// Normalize the given symbolic name in place according to UAX44-LM3. /// /// A "symbolic name" typically corresponds to property names and property /// value aliases. Note, though, that it should not be applied to property /// string values. /// /// The slice returned is guaranteed to be valid UTF-8 for all possible values /// of `slice`. /// /// See: https://unicode.org/reports/tr44/#UAX44-LM3 fn symbolic_name_normalize_bytes(slice: &mut [u8]) -> &mut [u8] { // I couldn't find a place in the standard that specified that property // names/aliases had a particular structure (unlike character names), but // we assume that it's ASCII only and drop anything that isn't ASCII. let mut start = 0; let mut starts_with_is = false; if slice.len() >= 2 { // Ignore any "is" prefix. starts_with_is = slice[0..2] == b"is"[..] || slice[0..2] == b"IS"[..] || slice[0..2] == b"iS"[..] || slice[0..2] == b"Is"[..]; if starts_with_is { start = 2; } } let mut next_write = 0; for i in start..slice.len() { // VALIDITY ARGUMENT: To guarantee that the resulting slice is valid // UTF-8, we ensure that the slice contains only ASCII bytes. In // particular, we drop every non-ASCII byte from the normalized string. let b = slice[i]; if b == b' ' || b == b'_' || b == b'-' { continue; } else if b'A' <= b && b <= b'Z' { slice[next_write] = b + (b'a' - b'A'); next_write += 1; } else if b <= 0x7F { slice[next_write] = b; next_write += 1; } } // Special case: ISO_Comment has a 'isc' abbreviation. Since we generally // ignore 'is' prefixes, the 'isc' abbreviation gets caught in the cross // fire and ends up creating an alias for 'c' to 'ISO_Comment', but it // is actually an alias for the 'Other' general category. if starts_with_is && next_write == 1 && slice[0] == b'c' { slice[0] = b'i'; slice[1] = b's'; slice[2] = b'c'; next_write = 3; } &mut slice[..next_write] } #[cfg(test)] mod tests { use super::*; #[cfg(feature = "unicode-case")] fn simple_fold_ok(c: char) -> impl Iterator<Item = char> { SimpleCaseFolder::new().unwrap().mapping(c).iter().copied() } #[cfg(feature = "unicode-case")] fn contains_case_map(start: char, end: char) -> bool { SimpleCaseFolder::new().unwrap().overlaps(start, end) } #[test] #[cfg(feature = "unicode-case")] fn simple_fold_k() { let xs: Vec<char> = simple_fold_ok('k').collect(); assert_eq!(xs, alloc::vec!['K', 'K']); let xs: Vec<char> = simple_fold_ok('K').collect(); assert_eq!(xs, alloc::vec!['k', 'K']); let xs: Vec<char> = simple_fold_ok('K').collect(); assert_eq!(xs, alloc::vec!['K', 'k']); } #[test] #[cfg(feature = "unicode-case")] fn simple_fold_a() { let xs: Vec<char> = simple_fold_ok('a').collect(); assert_eq!(xs, alloc::vec!['A']); let xs: Vec<char> = simple_fold_ok('A').collect(); assert_eq!(xs, alloc::vec!['a']); } #[test] #[cfg(not(feature = "unicode-case"))] fn simple_fold_disabled() { assert!(SimpleCaseFolder::new().is_err()); } #[test] #[cfg(feature = "unicode-case")] fn range_contains() { assert!(contains_case_map('A', 'A')); assert!(contains_case_map('Z', 'Z')); assert!(contains_case_map('A', 'Z')); assert!(contains_case_map('@', 'A')); assert!(contains_case_map('Z', '[')); assert!(contains_case_map('☃', 'Ⰰ')); assert!(!contains_case_map('[', '[')); assert!(!contains_case_map('[', '`')); assert!(!contains_case_map('☃', '☃')); } #[test] #[cfg(feature = "unicode-gencat")] fn regression_466() { use super::{CanonicalClassQuery, ClassQuery}; let q = ClassQuery::OneLetter('C'); assert_eq!( q.canonicalize().unwrap(), CanonicalClassQuery::GeneralCategory("Other") ); } #[test] fn sym_normalize() { let sym_norm = symbolic_name_normalize; assert_eq!(sym_norm("Line_Break"), "linebreak"); assert_eq!(sym_norm("Line-break"), "linebreak"); assert_eq!(sym_norm("linebreak"), "linebreak"); assert_eq!(sym_norm("BA"), "ba"); assert_eq!(sym_norm("ba"), "ba"); assert_eq!(sym_norm("Greek"), "greek"); assert_eq!(sym_norm("isGreek"), "greek"); assert_eq!(sym_norm("IS_Greek"), "greek"); assert_eq!(sym_norm("isc"), "isc"); assert_eq!(sym_norm("is c"), "isc"); assert_eq!(sym_norm("is_c"), "isc"); } #[test] fn valid_utf8_symbolic() { let mut x = b"abc\xFFxyz".to_vec(); let y = symbolic_name_normalize_bytes(&mut x); assert_eq!(y, b"abcxyz"); } } <file_sep>/regex-automata/src/util/prefilter/mod.rs /*! Defines a prefilter for accelerating regex searches. A prefilter can be created by building a [`Prefilter`] value. A prefilter represents one of the most important optimizations available for accelerating regex searches. The idea of a prefilter is to very quickly find candidate locations in a haystack where a regex _could_ match. Once a candidate is found, it is then intended for the regex engine to run at that position to determine whether the candidate is a match or a false positive. In the aforementioned description of the prefilter optimization also lay its demise. Namely, if a prefilter has a high false positive rate and it produces lots of candidates, then a prefilter can overall make a regex search slower. It can run more slowly because more time is spent ping-ponging between the prefilter search and the regex engine attempting to confirm each candidate as a match. This ping-ponging has overhead that adds up, and is exacerbated by a high false positive rate. Nevertheless, the optimization is still generally worth performing in most cases. Particularly given just how much throughput can be improved. (It is not uncommon for prefilter optimizations to improve throughput by one or two orders of magnitude.) Typically a prefilter is used to find occurrences of literal prefixes from a regex pattern, but this isn't required. A prefilter can be used to look for suffixes or even inner literals. Note that as of now, prefilters throw away information about which pattern each literal comes from. In other words, when a prefilter finds a match, there's no way to know which pattern (or patterns) it came from. Therefore, in order to confirm a match, you'll have to check all of the patterns by running the full regex engine. */ mod aho_corasick; mod byteset; mod memchr; mod memmem; mod teddy; use core::{ borrow::Borrow, fmt::Debug, panic::{RefUnwindSafe, UnwindSafe}, }; #[cfg(feature = "alloc")] use alloc::sync::Arc; #[cfg(feature = "syntax")] use regex_syntax::hir::{literal, Hir}; use crate::util::search::{MatchKind, Span}; pub(crate) use crate::util::prefilter::{ aho_corasick::AhoCorasick, byteset::ByteSet, memchr::{Memchr, Memchr2, Memchr3}, memmem::Memmem, teddy::Teddy, }; /// A prefilter for accelerating regex searches. /// /// If you already have your literals that you want to search with, /// then the vanilla [`Prefilter::new`] constructor is for you. But /// if you have an [`Hir`] value from the `regex-syntax` crate, then /// [`Prefilter::from_hir_prefix`] might be more convenient. Namely, it uses /// the [`regex-syntax::hir::literal`](regex_syntax::hir::literal) module to /// extract literal prefixes for you, optimize them and then select and build a /// prefilter matcher. /// /// A prefilter must have **zero false negatives**. However, by its very /// nature, it may produce false positives. That is, a prefilter will never /// skip over a position in the haystack that corresponds to a match of the /// original regex pattern, but it *may* produce a match for a position /// in the haystack that does *not* correspond to a match of the original /// regex pattern. If you use either the [`Prefilter::from_hir_prefix`] or /// [`Prefilter::from_hirs_prefix`] constructors, then this guarantee is /// upheld for you automatically. This guarantee is not preserved if you use /// [`Prefilter::new`] though, since it is up to the caller to provide correct /// literal strings with respect to the original regex pattern. /// /// # Cloning /// /// It is an API guarantee that cloning a prefilter is cheap. That is, cloning /// it will not duplicate whatever heap memory is used to represent the /// underlying matcher. /// /// # Example /// /// This example shows how to attach a `Prefilter` to the /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) in order to accelerate /// searches. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::prefilter::Prefilter, /// Match, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Bruce "]) /// .expect("a prefilter"); /// let re = PikeVM::builder() /// .configure(PikeVM::config().prefilter(Some(pre))) /// .build(r"Bruce \w+")?; /// let mut cache = re.create_cache(); /// assert_eq!( /// Some(Match::must(0, 6..23)), /// re.find(&mut cache, "Hello <NAME>!"), /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// But note that if you get your prefilter incorrect, it could lead to an /// incorrect result! /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::prefilter::Prefilter, /// Match, MatchKind, /// }; /// /// // This prefilter is wrong! /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Patti "]) /// .expect("a prefilter"); /// let re = PikeVM::builder() /// .configure(PikeVM::config().prefilter(Some(pre))) /// .build(r"Bruce \w+")?; /// let mut cache = re.create_cache(); /// // We find no match even though the regex does match. /// assert_eq!( /// None, /// re.find(&mut cache, "Hello <NAME>!"), /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Prefilter { #[cfg(not(feature = "alloc"))] _unused: (), #[cfg(feature = "alloc")] pre: Arc<dyn PrefilterI>, #[cfg(feature = "alloc")] is_fast: bool, } impl Prefilter { /// Create a new prefilter from a sequence of needles and a corresponding /// match semantics. /// /// This may return `None` for a variety of reasons, for example, if /// a suitable prefilter could not be constructed. That might occur /// if they are unavailable (e.g., the `perf-literal-substring` and /// `perf-literal-multisubstring` features aren't enabled), or it might /// occur because of heuristics or other artifacts of how the prefilter /// works. /// /// Note that if you have an [`Hir`] expression, it may be more convenient /// to use [`Prefilter::from_hir_prefix`]. It will automatically handle the /// task of extracting prefix literals for you. /// /// # Example /// /// This example shows how match semantics can impact the matching /// algorithm used by the prefilter. For this reason, it is important to /// ensure that the match semantics given here are consistent with the /// match semantics intended for the regular expression that the literals /// were extracted from. /// /// ``` /// use regex_automata::{ /// util::{prefilter::Prefilter, syntax}, /// MatchKind, Span, /// }; /// /// let hay = "Hello samwise"; /// /// // With leftmost-first, we find 'samwise' here because it comes /// // before 'sam' in the sequence we give it.. /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["samwise", "sam"]) /// .expect("a prefilter"); /// assert_eq!( /// Some(Span::from(6..13)), /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), /// ); /// // Still with leftmost-first but with the literals reverse, now 'sam' /// // will match instead! /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["sam", "samwise"]) /// .expect("a prefilter"); /// assert_eq!( /// Some(Span::from(6..9)), /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new<B: AsRef<[u8]>>( kind: MatchKind, needles: &[B], ) -> Option<Prefilter> { Choice::new(kind, needles).and_then(Prefilter::from_choice) } /// This turns a prefilter selection into a `Prefilter`. That is, in turns /// the enum given into a trait object. fn from_choice(choice: Choice) -> Option<Prefilter> { #[cfg(not(feature = "alloc"))] { None } #[cfg(feature = "alloc")] { let pre: Arc<dyn PrefilterI> = match choice { Choice::Memchr(p) => Arc::new(p), Choice::Memchr2(p) => Arc::new(p), Choice::Memchr3(p) => Arc::new(p), Choice::Memmem(p) => Arc::new(p), Choice::Teddy(p) => Arc::new(p), Choice::ByteSet(p) => Arc::new(p), Choice::AhoCorasick(p) => Arc::new(p), }; let is_fast = pre.is_fast(); Some(Prefilter { pre, is_fast }) } } /// This attempts to extract prefixes from the given `Hir` expression for /// the given match semantics, and if possible, builds a prefilter for /// them. /// /// # Example /// /// This example shows how to build a prefilter directly from an [`Hir`] /// expression, and use to find an occurrence of a prefix from the regex /// pattern. /// /// ``` /// use regex_automata::{ /// util::{prefilter::Prefilter, syntax}, /// MatchKind, Span, /// }; /// /// let hir = syntax::parse(r"(Bruce|Patti) \w+")?; /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) /// .expect("a prefilter"); /// let hay = "Hello Patti Scialfa!"; /// assert_eq!( /// Some(Span::from(6..12)), /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn from_hir_prefix(kind: MatchKind, hir: &Hir) -> Option<Prefilter> { Prefilter::from_hirs_prefix(kind, &[hir]) } /// This attempts to extract prefixes from the given `Hir` expressions for /// the given match semantics, and if possible, builds a prefilter for /// them. /// /// Note that as of now, prefilters throw away information about which /// pattern each literal comes from. In other words, when a prefilter finds /// a match, there's no way to know which pattern (or patterns) it came /// from. Therefore, in order to confirm a match, you'll have to check all /// of the patterns by running the full regex engine. /// /// # Example /// /// This example shows how to build a prefilter directly from multiple /// `Hir` expressions expression, and use it to find an occurrence of a /// prefix from the regex patterns. /// /// ``` /// use regex_automata::{ /// util::{prefilter::Prefilter, syntax}, /// MatchKind, Span, /// }; /// /// let hirs = syntax::parse_many(&[ /// r"(Bruce|Patti) \w+", /// r"Mrs?\. Doubtfire", /// ])?; /// let pre = Prefilter::from_hirs_prefix(MatchKind::LeftmostFirst, &hirs) /// .expect("a prefilter"); /// let hay = "Hello Mrs. Doubtfire"; /// assert_eq!( /// Some(Span::from(6..20)), /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn from_hirs_prefix<H: Borrow<Hir>>( kind: MatchKind, hirs: &[H], ) -> Option<Prefilter> { prefixes(kind, hirs) .literals() .and_then(|lits| Prefilter::new(kind, lits)) } /// Run this prefilter on `haystack[span.start..end]` and return a matching /// span if one exists. /// /// The span returned is guaranteed to have a start position greater than /// or equal to the one given, and an end position less than or equal to /// the one given. /// /// # Example /// /// This example shows how to build a prefilter directly from an [`Hir`] /// expression, and use it to find an occurrence of a prefix from the regex /// pattern. /// /// ``` /// use regex_automata::{ /// util::{prefilter::Prefilter, syntax}, /// MatchKind, Span, /// }; /// /// let hir = syntax::parse(r"Bruce \w+")?; /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) /// .expect("a prefilter"); /// let hay = "Hello <NAME>!"; /// assert_eq!( /// Some(Span::from(6..12)), /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "alloc"))] { unreachable!() } #[cfg(feature = "alloc")] { self.pre.find(haystack, span) } } /// Returns the span of a prefix of `haystack[span.start..span.end]` if /// the prefilter matches. /// /// The span returned is guaranteed to have a start position equivalent to /// the one given, and an end position less than or equal to the one given. /// /// # Example /// /// This example shows how to build a prefilter directly from an [`Hir`] /// expression, and use it to find an occurrence of a prefix from the regex /// pattern that begins at the start of a haystack only. /// /// ``` /// use regex_automata::{ /// util::{prefilter::Prefilter, syntax}, /// MatchKind, Span, /// }; /// /// let hir = syntax::parse(r"Bruce \w+")?; /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) /// .expect("a prefilter"); /// let hay = "Hello <NAME>!"; /// // Nothing is found here because 'Bruce' does /// // not occur at the beginning of our search. /// assert_eq!( /// None, /// pre.prefix(hay.as_bytes(), Span::from(0..hay.len())), /// ); /// // But if we change where we start the search /// // to begin where 'Bruce ' begins, then a /// // match will be found. /// assert_eq!( /// Some(Span::from(6..12)), /// pre.prefix(hay.as_bytes(), Span::from(6..hay.len())), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "alloc"))] { unreachable!() } #[cfg(feature = "alloc")] { self.pre.prefix(haystack, span) } } /// Returns the heap memory, in bytes, used by the underlying prefilter. #[inline] pub fn memory_usage(&self) -> usize { #[cfg(not(feature = "alloc"))] { unreachable!() } #[cfg(feature = "alloc")] { self.pre.memory_usage() } } /// Implementations might return true here if they believe themselves to /// be "fast." The concept of "fast" is deliberately left vague, but in /// practice this usually corresponds to whether it's believed that SIMD /// will be used. /// /// Why do we care about this? Well, some prefilter tricks tend to come /// with their own bits of overhead, and so might only make sense if we /// know that a scan will be *much* faster than the regex engine itself. /// Otherwise, the trick may not be worth doing. Whether something is /// "much" faster than the regex engine generally boils down to whether /// SIMD is used. (But not always. Even a SIMD matcher with a high false /// positive rate can become quite slow.) /// /// Even if this returns true, it is still possible for the prefilter to /// be "slow." Remember, prefilters are just heuristics. We can't really /// *know* a prefilter will be fast without actually trying the prefilter. /// (Which of course we cannot afford to do.) #[inline] pub(crate) fn is_fast(&self) -> bool { #[cfg(not(feature = "alloc"))] { unreachable!() } #[cfg(feature = "alloc")] { self.is_fast } } } /// A trait for abstracting over prefilters. Basically, a prefilter is /// something that do an unanchored *and* an anchored search in a haystack /// within a given span. /// /// This exists pretty much only so that we can use prefilters as a trait /// object (which is what `Prefilter` is). If we ever move off of trait objects /// and to an enum, then it's likely this trait could be removed. pub(crate) trait PrefilterI: Debug + Send + Sync + RefUnwindSafe + UnwindSafe + 'static { /// Run this prefilter on `haystack[span.start..end]` and return a matching /// span if one exists. /// /// The span returned is guaranteed to have a start position greater than /// or equal to the one given, and an end position less than or equal to /// the one given. fn find(&self, haystack: &[u8], span: Span) -> Option<Span>; /// Returns the span of a prefix of `haystack[span.start..span.end]` if /// the prefilter matches. /// /// The span returned is guaranteed to have a start position equivalent to /// the one given, and an end position less than or equal to the one given. fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span>; /// Returns the heap memory, in bytes, used by the underlying prefilter. fn memory_usage(&self) -> usize; /// Implementations might return true here if they believe themselves to /// be "fast." See [`Prefilter::is_fast`] for more details. fn is_fast(&self) -> bool; } #[cfg(feature = "alloc")] impl<P: PrefilterI + ?Sized> PrefilterI for Arc<P> { #[cfg_attr(feature = "perf-inline", inline(always))] fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { (&**self).find(haystack, span) } #[cfg_attr(feature = "perf-inline", inline(always))] fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { (&**self).prefix(haystack, span) } #[cfg_attr(feature = "perf-inline", inline(always))] fn memory_usage(&self) -> usize { (&**self).memory_usage() } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_fast(&self) -> bool { (&**self).is_fast() } } /// A type that encapsulates the selection of a prefilter algorithm from a /// sequence of needles. /// /// The existence of this type is a little tricky, because we don't (currently) /// use it for performing a search. Instead, we really only consume it by /// converting the underlying prefilter into a trait object, whether that be /// `dyn PrefilterI` or `dyn Strategy` (for the meta regex engine). In order /// to avoid re-copying the prefilter selection logic, we isolate it here, and /// then force anything downstream that wants to convert it to a trait object /// to do trivial case analysis on it. /// /// One wonders whether we *should* use an enum instead of a trait object. /// At time of writing, I chose trait objects based on instinct because 1) I /// knew I wasn't going to inline anything and 2) there would potentially be /// many different choices. However, as of time of writing, I haven't actually /// compared the trait object approach to the enum approach. That probably /// should be litigated, but I ran out of steam. /// /// Note that if the `alloc` feature is disabled, then values of this type /// are (and should) never be constructed. Also, in practice, for any of the /// prefilters to be selected, you'll need at least one of the `perf-literal-*` /// features enabled. #[derive(Clone, Debug)] pub(crate) enum Choice { Memchr(Memchr), Memchr2(Memchr2), Memchr3(Memchr3), Memmem(Memmem), Teddy(Teddy), ByteSet(ByteSet), AhoCorasick(AhoCorasick), } impl Choice { /// Select what is believed to be the best prefilter algorithm for the /// match semantics and sequence of needles given. /// /// This selection algorithm uses the needles as given without any /// modification. For example, if `[bar]` is given, then this doesn't /// try to select `memchr` for `b`. Instead, it would select `memmem` /// for `bar`. If callers would want `memchr` selected for `[bar]`, then /// callers should massages the literals themselves. That is, callers are /// responsible for heuristics surrounding which sequence of literals is /// best. /// /// What this selection algorithm does is attempt to use the fastest /// prefilter that works for the literals given. So if `[a, b]`, is given, /// then `memchr2` is selected. /// /// Of course, which prefilter is selected is also subject to what /// is available. For example, if `alloc` isn't enabled, then /// that limits which prefilters can be selected. Similarly, if /// `perf-literal-substring` isn't enabled, then nothing from the `memchr` /// crate can be returned. pub(crate) fn new<B: AsRef<[u8]>>( kind: MatchKind, needles: &[B], ) -> Option<Choice> { // An empty set means the regex matches nothing, so no sense in // building a prefilter. if needles.len() == 0 { debug!("prefilter building failed: found empty set of literals"); return None; } // If the regex can match the empty string, then the prefilter // will by definition match at every position. This is obviously // completely ineffective. if needles.iter().any(|n| n.as_ref().is_empty()) { debug!("prefilter building failed: literals match empty string"); return None; } // BREADCRUMBS: Perhaps the literal optimizer should special case // sequences of length two or three if the leading bytes of each are // "rare"? Or perhaps, if there are two or three total possible leading // bytes, regardless of the number of literals, and all are rare... // Then well, perhaps we should use memchr2 or memchr3 in those cases? if let Some(pre) = Memchr::new(kind, needles) { debug!("prefilter built: memchr"); return Some(Choice::Memchr(pre)); } if let Some(pre) = Memchr2::new(kind, needles) { debug!("prefilter built: memchr2"); return Some(Choice::Memchr2(pre)); } if let Some(pre) = Memchr3::new(kind, needles) { debug!("prefilter built: memchr3"); return Some(Choice::Memchr3(pre)); } if let Some(pre) = Memmem::new(kind, needles) { debug!("prefilter built: memmem"); return Some(Choice::Memmem(pre)); } if let Some(pre) = Teddy::new(kind, needles) { debug!("prefilter built: teddy"); return Some(Choice::Teddy(pre)); } if let Some(pre) = ByteSet::new(kind, needles) { debug!("prefilter built: byteset"); return Some(Choice::ByteSet(pre)); } if let Some(pre) = AhoCorasick::new(kind, needles) { debug!("prefilter built: aho-corasick"); return Some(Choice::AhoCorasick(pre)); } debug!("prefilter building failed: no strategy could be found"); None } } /// Extracts all of the prefix literals from the given HIR expressions into a /// single `Seq`. The literals in the sequence are ordered with respect to the /// order of the given HIR expressions and consistent with the match semantics /// given. /// /// The sequence returned is "optimized." That is, they may be shrunk or even /// truncated according to heuristics with the intent of making them more /// useful as a prefilter. (Which translates to both using faster algorithms /// and minimizing the false positive rate.) /// /// Note that this erases any connection between the literals and which pattern /// (or patterns) they came from. /// /// The match kind given must correspond to the match semantics of the regex /// that is represented by the HIRs given. The match semantics may change the /// literal sequence returned. #[cfg(feature = "syntax")] pub(crate) fn prefixes<H>(kind: MatchKind, hirs: &[H]) -> literal::Seq where H: core::borrow::Borrow<Hir>, { let mut extractor = literal::Extractor::new(); extractor.kind(literal::ExtractKind::Prefix); let mut prefixes = literal::Seq::empty(); for hir in hirs { prefixes.union(&mut extractor.extract(hir.borrow())); } debug!( "prefixes (len={:?}, exact={:?}) extracted before optimization: {:?}", prefixes.len(), prefixes.is_exact(), prefixes ); match kind { MatchKind::All => { prefixes.sort(); prefixes.dedup(); } MatchKind::LeftmostFirst => { prefixes.optimize_for_prefix_by_preference(); } } debug!( "prefixes (len={:?}, exact={:?}) extracted after optimization: {:?}", prefixes.len(), prefixes.is_exact(), prefixes ); prefixes } /// Like `prefixes`, but for all suffixes of all matches for the given HIRs. #[cfg(feature = "syntax")] pub(crate) fn suffixes<H>(kind: MatchKind, hirs: &[H]) -> literal::Seq where H: core::borrow::Borrow<Hir>, { let mut extractor = literal::Extractor::new(); extractor.kind(literal::ExtractKind::Suffix); let mut suffixes = literal::Seq::empty(); for hir in hirs { suffixes.union(&mut extractor.extract(hir.borrow())); } debug!( "suffixes (len={:?}, exact={:?}) extracted before optimization: {:?}", suffixes.len(), suffixes.is_exact(), suffixes ); match kind { MatchKind::All => { suffixes.sort(); suffixes.dedup(); } MatchKind::LeftmostFirst => { suffixes.optimize_for_suffix_by_preference(); } } debug!( "suffixes (len={:?}, exact={:?}) extracted after optimization: {:?}", suffixes.len(), suffixes.is_exact(), suffixes ); suffixes } <file_sep>/regex-lite/src/interpolate.rs /*! Provides routines for interpolating capture group references. That is, if a replacement string contains references like `$foo` or `${foo1}`, then they are replaced with the corresponding capture values for the groups named `foo` and `foo1`, respectively. Similarly, syntax like `$1` and `${1}` is supported as well, with `1` corresponding to a capture group index and not a name. This module provides the free functions [`string`] and [`bytes`], which interpolate Rust Unicode strings and byte strings, respectively. # Format These routines support two different kinds of capture references: unbraced and braced. For the unbraced format, the format supported is `$ref` where `name` can be any character in the class `[0-9A-Za-z_]`. `ref` is always the longest possible parse. So for example, `$1a` corresponds to the capture group named `1a` and not the capture group at index `1`. If `ref` matches `^[0-9]+$`, then it is treated as a capture group index itself and not a name. For the braced format, the format supported is `${ref}` where `ref` can be any sequence of bytes except for `}`. If no closing brace occurs, then it is not considered a capture reference. As with the unbraced format, if `ref` matches `^[0-9]+$`, then it is treated as a capture group index and not a name. The braced format is useful for exerting precise control over the name of the capture reference. For example, `${1}a` corresponds to the capture group reference `1` followed by the letter `a`, where as `$1a` (as mentioned above) corresponds to the capture group reference `1a`. The braced format is also useful for expressing capture group names that use characters not supported by the unbraced format. For example, `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`. If a capture group reference is found and it does not refer to a valid capture group, then it will be replaced with the empty string. To write a literal `$`, use `$$`. To be clear, and as exhibited via the type signatures in the routines in this module, it is impossible for a replacement string to be invalid. A replacement string may not have the intended semantics, but the interpolation procedure itself can never fail. */ use alloc::string::String; /// Accepts a replacement string and interpolates capture references with their /// corresponding values. /// /// `append` should be a function that appends the string value of a capture /// group at a particular index to the string given. If the capture group /// index is invalid, then nothing should be appended. /// /// `name_to_index` should be a function that maps a capture group name to a /// capture group index. If the given name doesn't exist, then `None` should /// be returned. /// /// Finally, `dst` is where the final interpolated contents should be written. /// If `replacement` contains no capture group references, then `dst` will be /// equivalent to `replacement`. /// /// See the [module documentation](self) for details about the format /// supported. pub fn string( mut replacement: &str, mut append: impl FnMut(usize, &mut String), mut name_to_index: impl FnMut(&str) -> Option<usize>, dst: &mut String, ) { while !replacement.is_empty() { match replacement.find('$') { None => break, Some(i) => { dst.push_str(&replacement[..i]); replacement = &replacement[i..]; } } // Handle escaping of '$'. if replacement.as_bytes().get(1).map_or(false, |&b| b == b'$') { dst.push_str("$"); replacement = &replacement[2..]; continue; } debug_assert!(!replacement.is_empty()); let cap_ref = match find_cap_ref(replacement.as_bytes()) { Some(cap_ref) => cap_ref, None => { dst.push_str("$"); replacement = &replacement[1..]; continue; } }; replacement = &replacement[cap_ref.end..]; match cap_ref.cap { Ref::Number(i) => append(i, dst), Ref::Named(name) => { if let Some(i) = name_to_index(name) { append(i, dst); } } } } dst.push_str(replacement); } /* This should be uncommented and used if we ever provide public APIs for searching `&[u8]`. /// Accepts a replacement byte string and interpolates capture references with /// their corresponding values. /// /// `append` should be a function that appends the byte string value of a /// capture group at a particular index to the byte string given. If the /// capture group index is invalid, then nothing should be appended. /// /// `name_to_index` should be a function that maps a capture group name to a /// capture group index. If the given name doesn't exist, then `None` should /// be returned. /// /// Finally, `dst` is where the final interpolated contents should be written. /// If `replacement` contains no capture group references, then `dst` will be /// equivalent to `replacement`. /// /// See the [module documentation](self) for details about the format /// supported. pub fn bytes( mut replacement: &[u8], mut append: impl FnMut(usize, &mut Vec<u8>), mut name_to_index: impl FnMut(&str) -> Option<usize>, dst: &mut Vec<u8>, ) { while !replacement.is_empty() { match replacement.iter().position(|&b| b == b'$') { None => break, Some(i) => { dst.extend_from_slice(&replacement[..i]); replacement = &replacement[i..]; } } // Handle escaping of '$'. if replacement.get(1).map_or(false, |&b| b == b'$') { dst.push(b'$'); replacement = &replacement[2..]; continue; } debug_assert!(!replacement.is_empty()); let cap_ref = match find_cap_ref(replacement) { Some(cap_ref) => cap_ref, None => { dst.push(b'$'); replacement = &replacement[1..]; continue; } }; replacement = &replacement[cap_ref.end..]; match cap_ref.cap { Ref::Number(i) => append(i, dst), Ref::Named(name) => { if let Some(i) = name_to_index(name) { append(i, dst); } } } } dst.extend_from_slice(replacement); } */ /// `CaptureRef` represents a reference to a capture group inside some text. /// The reference is either a capture group name or a number. /// /// It is also tagged with the position in the text following the /// capture reference. #[derive(Clone, Copy, Debug, Eq, PartialEq)] struct CaptureRef<'a> { cap: Ref<'a>, end: usize, } /// A reference to a capture group in some text. /// /// e.g., `$2`, `$foo`, `${foo}`. #[derive(Clone, Copy, Debug, Eq, PartialEq)] enum Ref<'a> { Named(&'a str), Number(usize), } impl<'a> From<&'a str> for Ref<'a> { fn from(x: &'a str) -> Ref<'a> { Ref::Named(x) } } impl From<usize> for Ref<'static> { fn from(x: usize) -> Ref<'static> { Ref::Number(x) } } /// Parses a possible reference to a capture group name in the given text, /// starting at the beginning of `replacement`. /// /// If no such valid reference could be found, None is returned. /// /// Note that this returns a "possible" reference because this routine doesn't /// know whether the reference is to a valid group or not. If it winds up not /// being a valid reference, then it should be replaced with the empty string. fn find_cap_ref(replacement: &[u8]) -> Option<CaptureRef<'_>> { let mut i = 0; let rep: &[u8] = replacement; if rep.len() <= 1 || rep[0] != b'$' { return None; } i += 1; if rep[i] == b'{' { return find_cap_ref_braced(rep, i + 1); } let mut cap_end = i; while rep.get(cap_end).copied().map_or(false, is_valid_cap_letter) { cap_end += 1; } if cap_end == i { return None; } // We just verified that the range 0..cap_end is valid ASCII, so it must // therefore be valid UTF-8. If we really cared, we could avoid this UTF-8 // check via an unchecked conversion or by parsing the number straight from // &[u8]. let cap = core::str::from_utf8(&rep[i..cap_end]) .expect("valid UTF-8 capture name"); Some(CaptureRef { cap: match cap.parse::<usize>() { Ok(i) => Ref::Number(i), Err(_) => Ref::Named(cap), }, end: cap_end, }) } /// Looks for a braced reference, e.g., `${foo1}`. This assumes that an opening /// brace has been found at `i-1` in `rep`. This then looks for a closing /// brace and returns the capture reference within the brace. fn find_cap_ref_braced(rep: &[u8], mut i: usize) -> Option<CaptureRef<'_>> { assert_eq!(b'{', rep[i.checked_sub(1).unwrap()]); let start = i; while rep.get(i).map_or(false, |&b| b != b'}') { i += 1; } if !rep.get(i).map_or(false, |&b| b == b'}') { return None; } // When looking at braced names, we don't put any restrictions on the name, // so it's possible it could be invalid UTF-8. But a capture group name // can never be invalid UTF-8, so if we have invalid UTF-8, then we can // safely return None. let cap = match core::str::from_utf8(&rep[start..i]) { Err(_) => return None, Ok(cap) => cap, }; Some(CaptureRef { cap: match cap.parse::<usize>() { Ok(i) => Ref::Number(i), Err(_) => Ref::Named(cap), }, end: i + 1, }) } /// Returns true if and only if the given byte is allowed in a capture name /// written in non-brace form. fn is_valid_cap_letter(b: u8) -> bool { match b { b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_' => true, _ => false, } } #[cfg(test)] mod tests { use alloc::{string::String, vec, vec::Vec}; use super::{find_cap_ref, CaptureRef}; macro_rules! find { ($name:ident, $text:expr) => { #[test] fn $name() { assert_eq!(None, find_cap_ref($text.as_bytes())); } }; ($name:ident, $text:expr, $capref:expr) => { #[test] fn $name() { assert_eq!(Some($capref), find_cap_ref($text.as_bytes())); } }; } macro_rules! c { ($name_or_number:expr, $pos:expr) => { CaptureRef { cap: $name_or_number.into(), end: $pos } }; } find!(find_cap_ref1, "$foo", c!("foo", 4)); find!(find_cap_ref2, "${foo}", c!("foo", 6)); find!(find_cap_ref3, "$0", c!(0, 2)); find!(find_cap_ref4, "$5", c!(5, 2)); find!(find_cap_ref5, "$10", c!(10, 3)); // See https://github.com/rust-lang/regex/pull/585 // for more on characters following numbers find!(find_cap_ref6, "$42a", c!("42a", 4)); find!(find_cap_ref7, "${42}a", c!(42, 5)); find!(find_cap_ref8, "${42"); find!(find_cap_ref9, "${42 "); find!(find_cap_ref10, " $0 "); find!(find_cap_ref11, "$"); find!(find_cap_ref12, " "); find!(find_cap_ref13, ""); find!(find_cap_ref14, "$1-$2", c!(1, 2)); find!(find_cap_ref15, "$1_$2", c!("1_", 3)); find!(find_cap_ref16, "$x-$y", c!("x", 2)); find!(find_cap_ref17, "$x_$y", c!("x_", 3)); find!(find_cap_ref18, "${#}", c!("#", 4)); find!(find_cap_ref19, "${Z[}", c!("Z[", 5)); find!(find_cap_ref20, "${¾}", c!("¾", 5)); find!(find_cap_ref21, "${¾a}", c!("¾a", 6)); find!(find_cap_ref22, "${a¾}", c!("a¾", 6)); find!(find_cap_ref23, "${☃}", c!("☃", 6)); find!(find_cap_ref24, "${a☃}", c!("a☃", 7)); find!(find_cap_ref25, "${☃a}", c!("☃a", 7)); find!(find_cap_ref26, "${名字}", c!("名字", 9)); fn interpolate_string( mut name_to_index: Vec<(&'static str, usize)>, caps: Vec<&'static str>, replacement: &str, ) -> String { name_to_index.sort_by_key(|x| x.0); let mut dst = String::new(); super::string( replacement, |i, dst| { if let Some(&s) = caps.get(i) { dst.push_str(s); } }, |name| -> Option<usize> { name_to_index .binary_search_by_key(&name, |x| x.0) .ok() .map(|i| name_to_index[i].1) }, &mut dst, ); dst } /* fn interpolate_bytes( mut name_to_index: Vec<(&'static str, usize)>, caps: Vec<&'static str>, replacement: &str, ) -> String { name_to_index.sort_by_key(|x| x.0); let mut dst = vec![]; super::bytes( replacement.as_bytes(), |i, dst| { if let Some(&s) = caps.get(i) { dst.extend_from_slice(s.as_bytes()); } }, |name| -> Option<usize> { name_to_index .binary_search_by_key(&name, |x| x.0) .ok() .map(|i| name_to_index[i].1) }, &mut dst, ); String::from_utf8(dst).unwrap() } */ macro_rules! interp { ($name:ident, $map:expr, $caps:expr, $hay:expr, $expected:expr $(,)*) => { #[test] fn $name() { assert_eq!( $expected, interpolate_string($map, $caps, $hay), "interpolate::string failed", ); /* assert_eq!( $expected, interpolate_bytes($map, $caps, $hay), "interpolate::bytes failed", ); */ } }; } interp!( interp1, vec![("foo", 2)], vec!["", "", "xxx"], "test $foo test", "test xxx test", ); interp!( interp2, vec![("foo", 2)], vec!["", "", "xxx"], "test$footest", "test", ); interp!( interp3, vec![("foo", 2)], vec!["", "", "xxx"], "test${foo}test", "testxxxtest", ); interp!( interp4, vec![("foo", 2)], vec!["", "", "xxx"], "test$2test", "test", ); interp!( interp5, vec![("foo", 2)], vec!["", "", "xxx"], "test${2}test", "testxxxtest", ); interp!( interp6, vec![("foo", 2)], vec!["", "", "xxx"], "test $$foo test", "test $foo test", ); interp!( interp7, vec![("foo", 2)], vec!["", "", "xxx"], "test $foo", "test xxx", ); interp!( interp8, vec![("foo", 2)], vec!["", "", "xxx"], "$foo test", "xxx test", ); interp!( interp9, vec![("bar", 1), ("foo", 2)], vec!["", "yyy", "xxx"], "test $bar$foo", "test yyyxxx", ); interp!( interp10, vec![("bar", 1), ("foo", 2)], vec!["", "yyy", "xxx"], "test $ test", "test $ test", ); interp!( interp11, vec![("bar", 1), ("foo", 2)], vec!["", "yyy", "xxx"], "test ${} test", "test test", ); interp!( interp12, vec![("bar", 1), ("foo", 2)], vec!["", "yyy", "xxx"], "test ${ } test", "test test", ); interp!( interp13, vec![("bar", 1), ("foo", 2)], vec!["", "yyy", "xxx"], "test ${a b} test", "test test", ); interp!( interp14, vec![("bar", 1), ("foo", 2)], vec!["", "yyy", "xxx"], "test ${a} test", "test test", ); // This is a funny case where a braced reference is never closed, but // within the unclosed braced reference, there is an unbraced reference. // In this case, the braced reference is just treated literally and the // unbraced reference is found. interp!( interp15, vec![("bar", 1), ("foo", 2)], vec!["", "yyy", "xxx"], "test ${wat $bar ok", "test ${wat yyy ok", ); } <file_sep>/regex-automata/README.md regex-automata ============== This crate exposes a variety of regex engines used by the `regex` crate. It provides a vast, sprawling and "expert" level API to each regex engine. The regex engines provided by this crate focus heavily on finite automata implementations and specifically guarantee worst case `O(m * n)` time complexity for all searches. (Where `m ~ len(regex)` and `n ~ len(haystack)`.) [![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) [![Crates.io](https://img.shields.io/crates/v/regex-automata.svg)](https://crates.io/crates/regex-automata) ### Documentation https://docs.rs/regex-automata ### Example This example shows how to search for matches of multiple regexes, where each regex uses the same capture group names to parse different key-value formats. ```rust use regex_automata::{meta::Regex, PatternID}; let re = Regex::new_many(&[ r#"(?m)^(?<key>[[:word:]]+)=(?<val>[[:word:]]+)$"#, r#"(?m)^(?<key>[[:word:]]+)="(?<val>[^"]+)"$"#, r#"(?m)^(?<key>[[:word:]]+)='(?<val>[^']+)'$"#, r#"(?m)^(?<key>[[:word:]]+):\s*(?<val>[[:word:]]+)$"#, ]).unwrap(); let hay = r#" best_album="Blow Your Face Out" best_quote='"then as it was, then again it will be"' best_year=1973 best_simpsons_episode: HOMR "#; let mut kvs = vec![]; for caps in re.captures_iter(hay) { // N.B. One could use capture indices '1' and '2' here // as well. Capture indices are local to each pattern. // (Just like names are.) let key = &hay[caps.get_group_by_name("key").unwrap()]; let val = &hay[caps.get_group_by_name("val").unwrap()]; kvs.push((key, val)); } assert_eq!(kvs, vec![ ("best_album", "Blow Your Face Out"), ("best_quote", "\"then as it was, then again it will be\""), ("best_year", "1973"), ("best_simpsons_episode", "HOMR"), ]); ``` ### Safety **I welcome audits of `unsafe` code.** This crate tries to be extremely conservative in its use of `unsafe`, but does use it in a few spots. In general, I am very open to removing uses of `unsafe` if it doesn't result in measurable performance regressions and doesn't result in significantly more complex code. Below is an outline of how `unsafe` is used in this crate. * `util::pool::Pool` makes use of `unsafe` to implement a fast path for accessing an element of the pool. The fast path applies to the first thread that uses the pool. In effect, the fast path is fast because it avoid a mutex lock. `unsafe` is also used in the no-std version of `Pool` to implement a spin lock for synchronization. * `util::lazy::Lazy` uses `unsafe` to implement a variant of `once_cell::sync::Lazy` that works in no-std environments. A no-std no-alloc implementation is also provided that requires use of `unsafe`. * The `dfa` module makes extensive use of `unsafe` to support zero-copy deserialization of DFAs. The high level problem is that you need to get from `&[u8]` to the internal representation of a DFA without doing any copies. This is required for support in no-std no-alloc environments. It also makes deserialization extremely cheap. * The `dfa` and `hybrid` modules use `unsafe` to explicitly elide bounds checks in the core search loops. This makes the codegen tighter and typically leads to consistent 5-10% performance improvements on some workloads. In general, the above reflect the only uses of `unsafe` throughout the entire `regex` crate. At present, there are no plans to meaningfully expand the use of `unsafe`. With that said, one thing folks have been asking for is cheap deserialization of a `regex::Regex`. My sense is that this feature will require a lot more `unsafe` in places to support zero-copy deserialization. It is unclear at this point whether this will be pursued. ### Motivation I started out building this crate because I wanted to re-work the `regex` crate internals to make it more amenable to optimizations. It turns out that there are a lot of different ways to build regex engines and even more ways to compose them. Moreover, heuristic literal optimizations are often tricky to get correct, but the fruit they bear is attractive. All of these things were difficult to expand upon without risking the introduction of more bugs. So I decided to tear things down and start fresh. In the course of doing so, I ended up designing strong boundaries between each component so that each component could be reasoned and tested independently. This also made it somewhat natural to expose the components as a library unto itself. Namely, folks have been asking for more capabilities in the regex crate for a long time, but these capabilities usually come with additional API complexity that I didn't want to introduce in the `regex` crate proper. But exposing them in an "expert" level crate like `regex-automata` seemed quite fine. In the end, I do still somewhat consider this crate an experiment. It is unclear whether the strong boundaries between components will be an impediment to ongoing development or not. De-coupling tends to lead to slower development in my experience, and when you mix in the added cost of not introducing breaking changes all of the time, things can get quite complicated. But, I don't think anyone has ever release the internals of a regex engine as a library before. So it will be interesting to see how it plays out! <file_sep>/regex-automata/src/dfa/start.rs use core::mem::size_of; use crate::util::wire::{self, DeserializeError, Endian, SerializeError}; /// The kind of anchored starting configurations to support in a DFA. /// /// Fully compiled DFAs need to be explicitly configured as to which anchored /// starting configurations to support. The reason for not just supporting /// everything unconditionally is that it can use more resources (such as /// memory and build time). The downside of this is that if you try to execute /// a search using an [`Anchored`](crate::Anchored) mode that is not supported /// by the DFA, then the search will return an error. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum StartKind { /// Support both anchored and unanchored searches. Both, /// Support only unanchored searches. Requesting an anchored search will /// panic. /// /// Note that even if an unanchored search is requested, the pattern itself /// may still be anchored. For example, `^abc` will only match `abc` at the /// start of a haystack. This will remain true, even if the regex engine /// only supported unanchored searches. Unanchored, /// Support only anchored searches. Requesting an unanchored search will /// panic. Anchored, } impl StartKind { pub(crate) fn from_bytes( slice: &[u8], ) -> Result<(StartKind, usize), DeserializeError> { wire::check_slice_len(slice, size_of::<u32>(), "start kind bytes")?; let (n, nr) = wire::try_read_u32(slice, "start kind integer")?; match n { 0 => Ok((StartKind::Both, nr)), 1 => Ok((StartKind::Unanchored, nr)), 2 => Ok((StartKind::Anchored, nr)), _ => Err(DeserializeError::generic("unrecognized start kind")), } } pub(crate) fn write_to<E: Endian>( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("start kind")); } let n = match *self { StartKind::Both => 0, StartKind::Unanchored => 1, StartKind::Anchored => 2, }; E::write_u32(n, dst); Ok(nwrite) } pub(crate) fn write_to_len(&self) -> usize { size_of::<u32>() } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn has_unanchored(&self) -> bool { matches!(*self, StartKind::Both | StartKind::Unanchored) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn has_anchored(&self) -> bool { matches!(*self, StartKind::Both | StartKind::Anchored) } } <file_sep>/regex-cli/cmd/debug/mod.rs use std::io::{stdout, Write}; use crate::{ args, util::{self, Table}, }; mod dfa; mod literal; pub fn run(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of various things from regex-automata and regex-syntax. This is useful for ad hoc interactions with objects on the command line. In general, most objects support the full suite of configuration available in code via the crate. USAGE: regex-cli debug <command> ... COMMANDS: ast Print the debug representation of an AST. dense Print the debug representation of a dense DFA. hir Print the debug representation of an HIR. literal Print the debug representation of extracted literals. onepass Print the debug representation of a one-pass DFA. sparse Print the debug representation of a sparse DFA. thompson Print the debug representation of a Thompson NFA. "; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "ast" => run_ast(p), "dense" => dfa::run_dense(p), "hir" => run_hir(p), "literal" => literal::run(p), "onepass" => run_onepass(p), "sparse" => dfa::run_sparse(p), "thompson" => run_thompson(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_ast(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of an abstract syntax tree (AST). USAGE: regex-cli debug ast <pattern> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); args::configure(p, USAGE, &mut [&mut common, &mut patterns, &mut syntax])?; let pats = patterns.get()?; anyhow::ensure!( pats.len() == 1, "only one pattern is allowed, but {} were given", pats.len(), ); let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:#?}", &asts[0])?; } Ok(()) } fn run_hir(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a high-level intermediate representation (HIR). USAGE: regex-cli debug hir <pattern> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); args::configure(p, USAGE, &mut [&mut common, &mut patterns, &mut syntax])?; let pats = patterns.get()?; anyhow::ensure!( pats.len() == 1, "only one pattern is allowed, but {} were given", pats.len(), ); let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:#?}", &hirs[0])?; } Ok(()) } fn run_onepass(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a one-pass DFA. USAGE: regex-cli debug onepass [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut onepass = args::onepass::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut syntax, &mut thompson, &mut onepass, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfa, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile nfa time", time); let (dfa, time) = util::timeitr(|| onepass.from_nfa(&nfa))?; table.add("compile one-pass DFA time", time); table.add("memory", dfa.memory_usage()); table.add("states", dfa.state_len()); table.add("pattern len", dfa.pattern_len()); table.add("alphabet len", dfa.alphabet_len()); table.add("stride", dfa.stride()); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:?}", dfa)?; } Ok(()) } fn run_thompson(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a Thompson NFA. USAGE: regex-cli debug thompson [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); args::configure( p, USAGE, &mut [&mut common, &mut patterns, &mut syntax, &mut thompson], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfa, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile nfa time", time); table.add("memory", nfa.memory_usage()); table.add("states", nfa.states().len()); table.add("pattern len", nfa.pattern_len()); table.add("capture len", nfa.group_info().all_group_len()); table.add("has empty?", nfa.has_empty()); table.add("is utf8?", nfa.is_utf8()); table.add("is reverse?", nfa.is_reverse()); table.add( "line terminator", bstr::BString::from(&[nfa.look_matcher().get_line_terminator()][..]), ); table.add("lookset any", nfa.look_set_any()); table.add("lookset prefix any", nfa.look_set_prefix_any()); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:?}", nfa)?; } Ok(()) } <file_sep>/tests/lib.rs #![cfg_attr(feature = "pattern", feature(pattern))] mod fuzz; mod misc; mod regression; mod regression_fuzz; mod replace; #[cfg(feature = "pattern")] mod searcher; mod suite_bytes; mod suite_bytes_set; mod suite_string; mod suite_string_set; const BLACKLIST: &[&str] = &[ // Nothing to blacklist yet! ]; fn suite() -> anyhow::Result<regex_test::RegexTests> { let _ = env_logger::try_init(); let mut tests = regex_test::RegexTests::new(); macro_rules! load { ($name:expr) => {{ const DATA: &[u8] = include_bytes!(concat!("../testdata/", $name, ".toml")); tests.load_slice($name, DATA)?; }}; } load!("anchored"); load!("bytes"); load!("crazy"); load!("crlf"); load!("earliest"); load!("empty"); load!("expensive"); load!("flags"); load!("iter"); load!("leftmost-all"); load!("line-terminator"); load!("misc"); load!("multiline"); load!("no-unicode"); load!("overlapping"); load!("regression"); load!("set"); load!("substring"); load!("unicode"); load!("utf8"); load!("word-boundary"); load!("fowler/basic"); load!("fowler/nullsubexpr"); load!("fowler/repetition"); Ok(tests) } <file_sep>/.github/workflows/ci.yml name: ci on: pull_request: push: branches: - master schedule: - cron: '00 01 * * *' # The section is needed to drop write-all permissions that are granted on # `schedule` event. By specifying any permission explicitly all others are set # to none. By using the principle of least privilege the damage a compromised # workflow can do (because of an injection or compromised third party tool or # action) is restricted. Currently the worklow doesn't need any additional # permission except for pulling the code. Adding labels to issues, commenting # on pull-requests, etc. may need additional permissions: # # Syntax for this section: # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions # # Reference for how to assign permissions on a job-by-job basis: # https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs # # Reference for available permissions that we can enable if needed: # https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token permissions: # to fetch code (actions/checkout) contents: read jobs: # This job does our basic build+test for supported platforms. test: env: # For some builds, we use cross to test on 32-bit and big-endian # systems. CARGO: cargo # When CARGO is set to CROSS, TARGET is set to `--target matrix.target`. # Note that we only use cross on Linux, so setting a target on a # different OS will just use normal cargo. TARGET: # Bump this as appropriate. We pin to a version to make sure CI # continues to work as cross releases in the past have broken things # in subtle ways. CROSS_VERSION: v0.2.5 runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - build: stable os: ubuntu-latest rust: stable - build: stable-32 os: ubuntu-latest rust: stable target: i686-unknown-linux-gnu - build: stable-powerpc64 os: ubuntu-latest rust: stable target: powerpc64-unknown-linux-gnu - build: stable-s390x os: ubuntu-latest rust: stable target: s390x-unknown-linux-gnu - build: beta os: ubuntu-latest rust: beta - build: nightly os: ubuntu-latest rust: nightly - build: macos os: macos-latest rust: stable - build: win-msvc os: windows-latest rust: stable - build: win-gnu os: windows-latest rust: stable-x86_64-gnu steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} - name: Install and configure Cross if: matrix.os == 'ubuntu-latest' && matrix.target != '' run: | # In the past, new releases of 'cross' have broken CI. So for now, we # pin it. We also use their pre-compiled binary releases because cross # has over 100 dependencies and takes a bit to compile. dir="$RUNNER_TEMP/cross-download" mkdir "$dir" echo "$dir" >> $GITHUB_PATH cd "$dir" curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz" tar xf cross-x86_64-unknown-linux-musl.tar.gz echo "CARGO=cross" >> $GITHUB_ENV echo "TARGET=--target ${{ matrix.target }}" >> $GITHUB_ENV - name: Show command used for Cargo run: | echo "cargo command is: $CARGO" echo "target flag is: $TARGET" - name: Show CPU info for debugging if: matrix.os == 'ubuntu-latest' run: lscpu - name: Basic build run: ${{ env.CARGO }} build --verbose $TARGET - name: Build docs run: ${{ env.CARGO }} doc --verbose $TARGET - name: Run subset of tests run: ${{ env.CARGO }} test --verbose --test integration $TARGET - name: Build regex-syntax docs run: ${{ env.CARGO }} doc --verbose --manifest-path regex-syntax/Cargo.toml $TARGET - name: Run subset of regex-syntax tests run: ${{ env.CARGO }} test --verbose --manifest-path regex-syntax/Cargo.toml $TARGET - name: Build regex-automata docs run: ${{ env.CARGO }} doc --verbose --manifest-path regex-automata/Cargo.toml $TARGET - name: Run subset of regex-automata tests if: matrix.build != 'win-gnu' # Just horrifically slow. run: ${{ env.CARGO }} test --verbose --manifest-path regex-automata/Cargo.toml $TARGET - name: Run regex-lite tests run: ${{ env.CARGO }} test --verbose --manifest-path regex-lite/Cargo.toml $TARGET - name: Run regex-cli tests run: ${{ env.CARGO }} test --verbose --manifest-path regex-cli/Cargo.toml $TARGET # This job runs a stripped down version of CI to test the MSRV. The specific # reason for doing this is that the regex crate's dev-dependencies tend to # evolve more quickly. There isn't as tight of a control on them because, # well, they're only used in tests and their MSRV doesn't matter as much. # # It is a bit unfortunate that our MSRV test is basically just "build it" # and pass if that works. But usually MSRV is broken by compilation problems # and not runtime behavior. So this is in practice good enough. msrv: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: 1.60.0 # The memchr 2.6 release purportedly bumped its MSRV to Rust 1.60, but it # turned out that on aarch64, it was using something that wasn't stabilized # until Rust 1.61[1]. (This was an oversight on my part. I had previously # thought everything I needed was on Rust 1.60.) To resolve that, I just # bumped memchr's MSRV to 1.61. Since it was so soon after the memchr 2.6 # release, I treated this as a bugfix. # # But the regex crate's MSRV is at Rust 1.60, and it now depends on at # least memchr 2.6 (to make use of its `alloc` feature). So we can't set # a lower minimal version. And I can't just bump the MSRV in a patch # release as a bug fix because regex 1.9 was released quite some time ago. # I could just release regex 1.10 and bump the MSRV there, but eh, I don't # want to put out another minor version release just for this. # # So... pin memchr to 2.6.2, which at least works on x86-64 on Rust 1.60. # # [1]: https://github.com/BurntSushi/memchr/issues/136 - name: Pin memchr to 2.6.2 run: cargo update -p memchr --precise 2.6.2 - name: Basic build run: cargo build --verbose - name: Build docs run: cargo doc --verbose # This job runs many more tests for the regex crate proper. Basically, # it repeats the same test suite for a bunch of different crate feature # combinations. There are so many features that exhaustive testing isn't # really possible, but we cover as much as is feasible. # # If there is a feature combo that should be tested but isn't, you'll want to # add it to the appropriate 'test' script in this repo. testfull-regex: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Run full test suite run: ./test # Same as above, but for regex-automata, which has even more crate features! testfull-regex-automata: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Run full test suite run: ./regex-automata/test # Same as above, but for regex-syntax. testfull-regex-syntax: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Run full test suite run: ./regex-syntax/test # Same as above, but for regex-capi. testfull-regex-capi: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Run full test suite run: ./regex-capi/test # Runs miri on regex-automata's test suite. This doesn't quite cover # everything. Many tests are disabled when building with miri because of # how slow miri runs. But it still gives us decent coverage. miri-regex-automata: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: # We use nightly here so that we can use miri I guess? # It caught me by surprise that miri seems to only be # available on nightly. toolchain: nightly components: miri - name: Run full test suite run: cargo miri test --manifest-path regex-automata/Cargo.toml # Tests that everything is formatted correctly. rustfmt: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable components: rustfmt - name: Check formatting run: | cargo fmt --all -- --check <file_sep>/regex-automata/src/nfa/thompson/builder.rs use core::mem; use alloc::{sync::Arc, vec, vec::Vec}; use crate::{ nfa::thompson::{ error::BuildError, nfa::{self, SparseTransitions, Transition, NFA}, }, util::{ look::{Look, LookMatcher}, primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, }, }; /// An intermediate NFA state used during construction. /// /// During construction of an NFA, it is often convenient to work with states /// that are amenable to mutation and other carry more information than we /// otherwise need once an NFA has been built. This type represents those /// needs. /// /// Once construction is finished, the builder will convert these states to a /// [`nfa::thompson::State`](crate::nfa::thompson::State). This conversion not /// only results in a simpler representation, but in some cases, entire classes /// of states are completely removed (such as [`State::Empty`]). #[derive(Clone, Debug, Eq, PartialEq)] enum State { /// An empty state whose only purpose is to forward the automaton to /// another state via an unconditional epsilon transition. /// /// Unconditional epsilon transitions are quite useful during the /// construction of an NFA, as they permit the insertion of no-op /// placeholders that make it easier to compose NFA sub-graphs. When /// the Thompson NFA builder produces a final NFA, all unconditional /// epsilon transitions are removed, and state identifiers are remapped /// accordingly. Empty { /// The next state that this state should transition to. next: StateID, }, /// A state that only transitions to another state if the current input /// byte is in a particular range of bytes. ByteRange { trans: Transition }, /// A state with possibly many transitions, represented in a sparse /// fashion. Transitions must be ordered lexicographically by input range /// and be non-overlapping. As such, this may only be used when every /// transition has equal priority. (In practice, this is only used for /// encoding large UTF-8 automata.) In contrast, a `Union` state has each /// alternate in order of priority. Priority is used to implement greedy /// matching and also alternations themselves, e.g., `abc|a` where `abc` /// has priority over `a`. /// /// To clarify, it is possible to remove `Sparse` and represent all things /// that `Sparse` is used for via `Union`. But this creates a more bloated /// NFA with more epsilon transitions than is necessary in the special case /// of character classes. Sparse { transitions: Vec<Transition> }, /// A conditional epsilon transition satisfied via some sort of /// look-around. Look { look: Look, next: StateID }, /// An empty state that records the start of a capture location. This is an /// unconditional epsilon transition like `Empty`, except it can be used to /// record position information for a captue group when using the NFA for /// search. CaptureStart { /// The ID of the pattern that this capture was defined. pattern_id: PatternID, /// The capture group index that this capture state corresponds to. /// The capture group index is always relative to its corresponding /// pattern. Therefore, in the presence of multiple patterns, both the /// pattern ID and the capture group index are required to uniquely /// identify a capturing group. group_index: SmallIndex, /// The next state that this state should transition to. next: StateID, }, /// An empty state that records the end of a capture location. This is an /// unconditional epsilon transition like `Empty`, except it can be used to /// record position information for a captue group when using the NFA for /// search. CaptureEnd { /// The ID of the pattern that this capture was defined. pattern_id: PatternID, /// The capture group index that this capture state corresponds to. /// The capture group index is always relative to its corresponding /// pattern. Therefore, in the presence of multiple patterns, both the /// pattern ID and the capture group index are required to uniquely /// identify a capturing group. group_index: SmallIndex, /// The next state that this state should transition to. next: StateID, }, /// An alternation such that there exists an epsilon transition to all /// states in `alternates`, where matches found via earlier transitions /// are preferred over later transitions. Union { alternates: Vec<StateID> }, /// An alternation such that there exists an epsilon transition to all /// states in `alternates`, where matches found via later transitions are /// preferred over earlier transitions. /// /// This "reverse" state exists for convenience during compilation that /// permits easy construction of non-greedy combinations of NFA states. At /// the end of compilation, Union and UnionReverse states are merged into /// one Union type of state, where the latter has its epsilon transitions /// reversed to reflect the priority inversion. /// /// The "convenience" here arises from the fact that as new states are /// added to the list of `alternates`, we would like that add operation /// to be amortized constant time. But if we used a `Union`, we'd need to /// prepend the state, which takes O(n) time. There are other approaches we /// could use to solve this, but this seems simple enough. UnionReverse { alternates: Vec<StateID> }, /// A state that cannot be transitioned out of. This is useful for cases /// where you want to prevent matching from occurring. For example, if your /// regex parser permits empty character classes, then one could choose a /// `Fail` state to represent it. Fail, /// A match state. There is at most one such occurrence of this state in /// an NFA for each pattern compiled into the NFA. At time of writing, a /// match state is always produced for every pattern given, but in theory, /// if a pattern can never lead to a match, then the match state could be /// omitted. /// /// `pattern_id` refers to the ID of the pattern itself, which corresponds /// to the pattern's index (starting at 0). Match { pattern_id: PatternID }, } impl State { /// If this state is an unconditional espilon transition, then this returns /// the target of the transition. fn goto(&self) -> Option<StateID> { match *self { State::Empty { next } => Some(next), State::Union { ref alternates } if alternates.len() == 1 => { Some(alternates[0]) } State::UnionReverse { ref alternates } if alternates.len() == 1 => { Some(alternates[0]) } _ => None, } } /// Returns the heap memory usage, in bytes, of this state. fn memory_usage(&self) -> usize { match *self { State::Empty { .. } | State::ByteRange { .. } | State::Look { .. } | State::CaptureStart { .. } | State::CaptureEnd { .. } | State::Fail | State::Match { .. } => 0, State::Sparse { ref transitions } => { transitions.len() * mem::size_of::<Transition>() } State::Union { ref alternates } => { alternates.len() * mem::size_of::<StateID>() } State::UnionReverse { ref alternates } => { alternates.len() * mem::size_of::<StateID>() } } } } /// An abstraction for building Thompson NFAs by hand. /// /// A builder is what a [`thompson::Compiler`](crate::nfa::thompson::Compiler) /// uses internally to translate a regex's high-level intermediate /// representation into an [`NFA`]. /// /// The primary function of this builder is to abstract away the internal /// representation of an NFA and make it difficult to produce NFAs are that /// internally invalid or inconsistent. This builder also provides a way to /// add "empty" states (which can be thought of as unconditional epsilon /// transitions), despite the fact that [`thompson::State`](nfa::State) does /// not have any "empty" representation. The advantage of "empty" states is /// that they make the code for constructing a Thompson NFA logically simpler. /// /// Many of the routines on this builder may panic or return errors. Generally /// speaking, panics occur when an invalid sequence of method calls were made, /// where as an error occurs if things get too big. (Where "too big" might mean /// exhausting identifier space or using up too much heap memory in accordance /// with the configured [`size_limit`](Builder::set_size_limit).) /// /// # Overview /// /// ## Adding multiple patterns /// /// Each pattern you add to an NFA should correspond to a pair of /// [`Builder::start_pattern`] and [`Builder::finish_pattern`] calls, with /// calls inbetween that add NFA states for that pattern. NFA states may be /// added without first calling `start_pattern`, with the exception of adding /// capturing states. /// /// ## Adding NFA states /// /// Here is a very brief overview of each of the methods that add NFA states. /// Every method adds a single state. /// /// * [`add_empty`](Builder::add_empty): Add a state with a single /// unconditional epsilon transition to another state. /// * [`add_union`](Builder::add_union): Adds a state with unconditional /// epsilon transitions to two or more states, with earlier transitions /// preferred over later ones. /// * [`add_union_reverse`](Builder::add_union_reverse): Adds a state with /// unconditional epsilon transitions to two or more states, with later /// transitions preferred over earlier ones. /// * [`add_range`](Builder::add_range): Adds a state with a single transition /// to another state that can only be followed if the current input byte is /// within the range given. /// * [`add_sparse`](Builder::add_sparse): Adds a state with two or more /// range transitions to other states, where a transition is only followed /// if the current input byte is within one of the ranges. All transitions /// in this state have equal priority, and the corresponding ranges must be /// non-overlapping. /// * [`add_look`](Builder::add_look): Adds a state with a single *conditional* /// epsilon transition to another state, where the condition depends on a /// limited look-around property. /// * [`add_capture_start`](Builder::add_capture_start): Adds a state with /// a single unconditional epsilon transition that also instructs an NFA /// simulation to record the current input position to a specific location in /// memory. This is intended to represent the starting location of a capturing /// group. /// * [`add_capture_end`](Builder::add_capture_end): Adds a state with /// a single unconditional epsilon transition that also instructs an NFA /// simulation to record the current input position to a specific location in /// memory. This is intended to represent the ending location of a capturing /// group. /// * [`add_fail`](Builder::add_fail): Adds a state that never transitions to /// another state. /// * [`add_match`](Builder::add_match): Add a state that indicates a match has /// been found for a particular pattern. A match state is a final state with /// no outgoing transitions. /// /// ## Setting transitions between NFA states /// /// The [`Builder::patch`] method creates a transition from one state to the /// next. If the `from` state corresponds to a state that supports multiple /// outgoing transitions (such as "union"), then this adds the corresponding /// transition. Otherwise, it sets the single transition. (This routine panics /// if `from` corresponds to a state added by `add_sparse`, since sparse states /// need more specialized handling.) /// /// # Example /// /// This annotated example shows how to hand construct the regex `[a-z]+` /// (without an unanchored prefix). /// /// ``` /// use regex_automata::{ /// nfa::thompson::{pikevm::PikeVM, Builder, Transition}, /// util::primitives::StateID, /// Match, /// }; /// /// let mut builder = Builder::new(); /// // Before adding NFA states for our pattern, we need to tell the builder /// // that we are starting the pattern. /// builder.start_pattern()?; /// // Since we use the Pike VM below for searching, we need to add capturing /// // states. If you're just going to build a DFA from the NFA, then capturing /// // states do not need to be added. /// let start = builder.add_capture_start(StateID::ZERO, 0, None)?; /// let range = builder.add_range(Transition { /// // We don't know the state ID of the 'next' state yet, so we just fill /// // in a dummy 'ZERO' value. /// start: b'a', end: b'z', next: StateID::ZERO, /// })?; /// // This state will point back to 'range', but also enable us to move ahead. /// // That is, this implements the '+' repetition operator. We add 'range' and /// // then 'end' below to this alternation. /// let alt = builder.add_union(vec![])?; /// // The final state before the match state, which serves to capture the /// // end location of the match. /// let end = builder.add_capture_end(StateID::ZERO, 0)?; /// // The match state for our pattern. /// let mat = builder.add_match()?; /// // Now we fill in the transitions between states. /// builder.patch(start, range)?; /// builder.patch(range, alt)?; /// // If we added 'end' before 'range', then we'd implement non-greedy /// // matching, i.e., '+?'. /// builder.patch(alt, range)?; /// builder.patch(alt, end)?; /// builder.patch(end, mat)?; /// // We must explicitly finish pattern and provide the starting state ID for /// // this particular pattern. /// builder.finish_pattern(start)?; /// // Finally, when we build the NFA, we provide the anchored and unanchored /// // starting state IDs. Since we didn't bother with an unanchored prefix /// // here, we only support anchored searching. Thus, both starting states are /// // the same. /// let nfa = builder.build(start, start)?; /// /// // Now build a Pike VM from our NFA, and use it for searching. This shows /// // how we can use a regex engine without ever worrying about syntax! /// let re = PikeVM::new_from_nfa(nfa)?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// let expected = Some(Match::must(0, 0..3)); /// re.captures(&mut cache, "foo0", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug, Default)] pub struct Builder { /// The ID of the pattern that we're currently building. /// /// Callers are required to set (and unset) this by calling /// {start,finish}_pattern. Otherwise, most methods will panic. pattern_id: Option<PatternID>, /// A sequence of intermediate NFA states. Once a state is added to this /// sequence, it is assigned a state ID equivalent to its index. Once a /// state is added, it is still expected to be mutated, e.g., to set its /// transition to a state that didn't exist at the time it was added. states: Vec<State>, /// The starting states for each individual pattern. Starting at any /// of these states will result in only an anchored search for the /// corresponding pattern. The vec is indexed by pattern ID. When the NFA /// contains a single regex, then `start_pattern[0]` and `start_anchored` /// are always equivalent. start_pattern: Vec<StateID>, /// A map from pattern ID to capture group index to name. (If no name /// exists, then a None entry is present. Thus, all capturing groups are /// present in this mapping.) /// /// The outer vec is indexed by pattern ID, while the inner vec is indexed /// by capture index offset for the corresponding pattern. /// /// The first capture group for each pattern is always unnamed and is thus /// always None. captures: Vec<Vec<Option<Arc<str>>>>, /// The combined memory used by each of the 'State's in 'states'. This /// only includes heap usage by each state, and not the size of the state /// itself. In other words, this tracks heap memory used that isn't /// captured via `size_of::<State>() * states.len()`. memory_states: usize, /// Whether this NFA only matches UTF-8 and whether regex engines using /// this NFA for searching should report empty matches that split a /// codepoint. utf8: bool, /// Whether this NFA should be matched in reverse or not. reverse: bool, /// The matcher to use for look-around assertions. look_matcher: LookMatcher, /// A size limit to respect when building an NFA. If the total heap memory /// of the intermediate NFA states exceeds (or would exceed) this amount, /// then an error is returned. size_limit: Option<usize>, } impl Builder { /// Create a new builder for hand-assembling NFAs. pub fn new() -> Builder { Builder::default() } /// Clear this builder. /// /// Clearing removes all state associated with building an NFA, but does /// not reset configuration (such as size limits and whether the NFA /// should only match UTF-8). After clearing, the builder can be reused to /// assemble an entirely new NFA. pub fn clear(&mut self) { self.pattern_id = None; self.states.clear(); self.start_pattern.clear(); self.captures.clear(); self.memory_states = 0; } /// Assemble a [`NFA`] from the states added so far. /// /// After building an NFA, more states may be added and `build` may be /// called again. To reuse a builder to produce an entirely new NFA from /// scratch, call the [`clear`](Builder::clear) method first. /// /// `start_anchored` refers to the ID of the starting state that anchored /// searches should use. That is, searches who matches are limited to the /// starting position of the search. /// /// `start_unanchored` refers to the ID of the starting state that /// unanchored searches should use. This permits searches to report matches /// that start after the beginning of the search. In cases where unanchored /// searches are not supported, the unanchored starting state ID must be /// the same as the anchored starting state ID. /// /// # Errors /// /// This returns an error if there was a problem producing the final NFA. /// In particular, this might include an error if the capturing groups /// added to this builder violate any of the invariants documented on /// [`GroupInfo`](crate::util::captures::GroupInfo). /// /// # Panics /// /// If `start_pattern` was called, then `finish_pattern` must be called /// before `build`, otherwise this panics. /// /// This may panic for other invalid uses of a builder. For example, if /// a "start capture" state was added without a corresponding "end capture" /// state. pub fn build( &self, start_anchored: StateID, start_unanchored: StateID, ) -> Result<NFA, BuildError> { assert!(self.pattern_id.is_none(), "must call 'finish_pattern' first"); debug!( "intermediate NFA compilation via builder is complete, \ intermediate NFA size: {} states, {} bytes on heap", self.states.len(), self.memory_usage(), ); let mut nfa = nfa::Inner::default(); nfa.set_utf8(self.utf8); nfa.set_reverse(self.reverse); nfa.set_look_matcher(self.look_matcher.clone()); // A set of compiler internal state IDs that correspond to states // that are exclusively epsilon transitions, i.e., goto instructions, // combined with the state that they point to. This is used to // record said states while transforming the compiler's internal NFA // representation to the external form. let mut empties = vec![]; // A map used to re-map state IDs when translating this builder's // internal NFA state representation to the final NFA representation. let mut remap = vec![]; remap.resize(self.states.len(), StateID::ZERO); nfa.set_starts(start_anchored, start_unanchored, &self.start_pattern); nfa.set_captures(&self.captures).map_err(BuildError::captures)?; // The idea here is to convert our intermediate states to their final // form. The only real complexity here is the process of converting // transitions, which are expressed in terms of state IDs. The new // set of states will be smaller because of partial epsilon removal, // so the state IDs will not be the same. for (sid, state) in self.states.iter().with_state_ids() { match *state { State::Empty { next } => { // Since we're removing empty states, we need to handle // them later since we don't yet know which new state this // empty state will be mapped to. empties.push((sid, next)); } State::ByteRange { trans } => { remap[sid] = nfa.add(nfa::State::ByteRange { trans }); } State::Sparse { ref transitions } => { remap[sid] = match transitions.len() { 0 => nfa.add(nfa::State::Fail), 1 => nfa.add(nfa::State::ByteRange { trans: transitions[0], }), _ => { let transitions = transitions.to_vec().into_boxed_slice(); let sparse = SparseTransitions { transitions }; nfa.add(nfa::State::Sparse(sparse)) } } } State::Look { look, next } => { remap[sid] = nfa.add(nfa::State::Look { look, next }); } State::CaptureStart { pattern_id, group_index, next } => { // We can't remove this empty state because of the side // effect of capturing an offset for this capture slot. let slot = nfa .group_info() .slot(pattern_id, group_index.as_usize()) .expect("invalid capture index"); let slot = SmallIndex::new(slot).expect("a small enough slot"); remap[sid] = nfa.add(nfa::State::Capture { next, pattern_id, group_index, slot, }); } State::CaptureEnd { pattern_id, group_index, next } => { // We can't remove this empty state because of the side // effect of capturing an offset for this capture slot. // Also, this always succeeds because we check that all // slot indices are valid for all capture indices when they // are initially added. let slot = nfa .group_info() .slot(pattern_id, group_index.as_usize()) .expect("invalid capture index") .checked_add(1) .unwrap(); let slot = SmallIndex::new(slot).expect("a small enough slot"); remap[sid] = nfa.add(nfa::State::Capture { next, pattern_id, group_index, slot, }); } State::Union { ref alternates } => { if alternates.is_empty() { remap[sid] = nfa.add(nfa::State::Fail); } else if alternates.len() == 1 { empties.push((sid, alternates[0])); remap[sid] = alternates[0]; } else if alternates.len() == 2 { remap[sid] = nfa.add(nfa::State::BinaryUnion { alt1: alternates[0], alt2: alternates[1], }); } else { let alternates = alternates.to_vec().into_boxed_slice(); remap[sid] = nfa.add(nfa::State::Union { alternates }); } } State::UnionReverse { ref alternates } => { if alternates.is_empty() { remap[sid] = nfa.add(nfa::State::Fail); } else if alternates.len() == 1 { empties.push((sid, alternates[0])); remap[sid] = alternates[0]; } else if alternates.len() == 2 { remap[sid] = nfa.add(nfa::State::BinaryUnion { alt1: alternates[1], alt2: alternates[0], }); } else { let mut alternates = alternates.to_vec().into_boxed_slice(); alternates.reverse(); remap[sid] = nfa.add(nfa::State::Union { alternates }); } } State::Fail => { remap[sid] = nfa.add(nfa::State::Fail); } State::Match { pattern_id } => { remap[sid] = nfa.add(nfa::State::Match { pattern_id }); } } } // Some of the new states still point to empty state IDs, so we need to // follow each of them and remap the empty state IDs to their non-empty // state IDs. // // We also keep track of which states we've already mapped. This helps // avoid quadratic behavior in a long chain of empty states. For // example, in 'a{0}{50000}'. let mut remapped = vec![false; self.states.len()]; for &(empty_id, empty_next) in empties.iter() { if remapped[empty_id] { continue; } // empty states can point to other empty states, forming a chain. // So we must follow the chain until the end, which must end at // a non-empty state, and therefore, a state that is correctly // remapped. We are guaranteed to terminate because our compiler // never builds a loop among only empty states. let mut new_next = empty_next; while let Some(next) = self.states[new_next].goto() { new_next = next; } remap[empty_id] = remap[new_next]; remapped[empty_id] = true; // Now that we've remapped the main 'empty_id' above, we re-follow // the chain from above and remap every empty state we found along // the way to our ultimate non-empty target. We are careful to set // 'remapped' to true for each such state. We thus will not need // to re-compute this chain for any subsequent empty states in // 'empties' that are part of this chain. let mut next2 = empty_next; while let Some(next) = self.states[next2].goto() { remap[next2] = remap[new_next]; remapped[next2] = true; next2 = next; } } // Finally remap all of the state IDs. nfa.remap(&remap); let final_nfa = nfa.into_nfa(); debug!( "NFA compilation via builder complete, \ final NFA size: {} states, {} bytes on heap, \ has empty? {:?}, utf8? {:?}", final_nfa.states().len(), final_nfa.memory_usage(), final_nfa.has_empty(), final_nfa.is_utf8(), ); Ok(final_nfa) } /// Start the assembly of a pattern in this NFA. /// /// Upon success, this returns the identifier for the new pattern. /// Identifiers start at `0` and are incremented by 1 for each new pattern. /// /// It is necessary to call this routine before adding capturing states. /// Otherwise, any other NFA state may be added before starting a pattern. /// /// # Errors /// /// If the pattern identifier space is exhausted, then this returns an /// error. /// /// # Panics /// /// If this is called while assembling another pattern (i.e., before /// `finish_pattern` is called), then this panics. pub fn start_pattern(&mut self) -> Result<PatternID, BuildError> { assert!(self.pattern_id.is_none(), "must call 'finish_pattern' first"); let proposed = self.start_pattern.len(); let pid = PatternID::new(proposed) .map_err(|_| BuildError::too_many_patterns(proposed))?; self.pattern_id = Some(pid); // This gets filled in when 'finish_pattern' is called. self.start_pattern.push(StateID::ZERO); Ok(pid) } /// Finish the assembly of a pattern in this NFA. /// /// Upon success, this returns the identifier for the new pattern. /// Identifiers start at `0` and are incremented by 1 for each new /// pattern. This is the same identifier returned by the corresponding /// `start_pattern` call. /// /// Note that `start_pattern` and `finish_pattern` pairs cannot be /// interleaved or nested. A correct `finish_pattern` call _always_ /// corresponds to the most recently called `start_pattern` routine. /// /// # Errors /// /// This currently never returns an error, but this is subject to change. /// /// # Panics /// /// If this is called without a corresponding `start_pattern` call, then /// this panics. pub fn finish_pattern( &mut self, start_id: StateID, ) -> Result<PatternID, BuildError> { let pid = self.current_pattern_id(); self.start_pattern[pid] = start_id; self.pattern_id = None; Ok(pid) } /// Returns the pattern identifier of the current pattern. /// /// # Panics /// /// If this doesn't occur after a `start_pattern` call and before the /// corresponding `finish_pattern` call, then this panics. pub fn current_pattern_id(&self) -> PatternID { self.pattern_id.expect("must call 'start_pattern' first") } /// Returns the number of patterns added to this builder so far. /// /// This only includes patterns that have had `finish_pattern` called /// for them. pub fn pattern_len(&self) -> usize { self.start_pattern.len() } /// Add an "empty" NFA state. /// /// An "empty" NFA state is a state with a single unconditional epsilon /// transition to another NFA state. Such empty states are removed before /// building the final [`NFA`] (which has no such "empty" states), but they /// can be quite useful in the construction process of an NFA. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. pub fn add_empty(&mut self) -> Result<StateID, BuildError> { self.add(State::Empty { next: StateID::ZERO }) } /// Add a "union" NFA state. /// /// A "union" NFA state that contains zero or more unconditional epsilon /// transitions to other NFA states. The order of these transitions /// reflects a priority order where earlier transitions are preferred over /// later transitions. /// /// Callers may provide an empty set of alternates to this method call, and /// then later add transitions via `patch`. At final build time, a "union" /// state with no alternates is converted to a "fail" state, and a "union" /// state with exactly one alternate is treated as if it were an "empty" /// state. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. pub fn add_union( &mut self, alternates: Vec<StateID>, ) -> Result<StateID, BuildError> { self.add(State::Union { alternates }) } /// Add a "reverse union" NFA state. /// /// A "reverse union" NFA state contains zero or more unconditional epsilon /// transitions to other NFA states. The order of these transitions /// reflects a priority order where later transitions are preferred /// over earlier transitions. This is an inverted priority order when /// compared to `add_union`. This is useful, for example, for implementing /// non-greedy repetition operators. /// /// Callers may provide an empty set of alternates to this method call, and /// then later add transitions via `patch`. At final build time, a "reverse /// union" state with no alternates is converted to a "fail" state, and a /// "reverse union" state with exactly one alternate is treated as if it /// were an "empty" state. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. pub fn add_union_reverse( &mut self, alternates: Vec<StateID>, ) -> Result<StateID, BuildError> { self.add(State::UnionReverse { alternates }) } /// Add a "range" NFA state. /// /// A "range" NFA state is a state with one outgoing transition to another /// state, where that transition may only be followed if the current input /// byte falls between a range of bytes given. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. pub fn add_range( &mut self, trans: Transition, ) -> Result<StateID, BuildError> { self.add(State::ByteRange { trans }) } /// Add a "sparse" NFA state. /// /// A "sparse" NFA state contains zero or more outgoing transitions, where /// the transition to be followed (if any) is chosen based on whether the /// current input byte falls in the range of one such transition. The /// transitions given *must* be non-overlapping and in ascending order. (A /// "sparse" state with no transitions is equivalent to a "fail" state.) /// /// A "sparse" state is like adding a "union" state and pointing it at a /// bunch of "range" states, except that the different alternates have /// equal priority. /// /// Note that a "sparse" state is the only state that cannot be patched. /// This is because a "sparse" state has many transitions, each of which /// may point to a different NFA state. Moreover, adding more such /// transitions requires more than just an NFA state ID to point to. It /// also requires a byte range. The `patch` routine does not support the /// additional information required. Therefore, callers must ensure that /// all outgoing transitions for this state are included when `add_sparse` /// is called. There is no way to add more later. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. /// /// # Panics /// /// This routine _may_ panic if the transitions given overlap or are not /// in ascending order. pub fn add_sparse( &mut self, transitions: Vec<Transition>, ) -> Result<StateID, BuildError> { self.add(State::Sparse { transitions }) } /// Add a "look" NFA state. /// /// A "look" NFA state corresponds to a state with exactly one /// *conditional* epsilon transition to another NFA state. Namely, it /// represents one of a small set of simplistic look-around operators. /// /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), /// and then change it later with [`patch`](Builder::patch). /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. pub fn add_look( &mut self, next: StateID, look: Look, ) -> Result<StateID, BuildError> { self.add(State::Look { look, next }) } /// Add a "start capture" NFA state. /// /// A "start capture" NFA state corresponds to a state with exactly one /// outgoing unconditional epsilon transition to another state. Unlike /// "empty" states, a "start capture" state also carries with it an /// instruction for saving the current position of input to a particular /// location in memory. NFA simulations, like the Pike VM, may use this /// information to report the match locations of capturing groups in a /// regex pattern. /// /// If the corresponding capturing group has a name, then callers should /// include it here. /// /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), /// and then change it later with [`patch`](Builder::patch). /// /// Note that unlike `start_pattern`/`finish_pattern`, capturing start and /// end states may be interleaved. Indeed, it is typical for many "start /// capture" NFA states to appear before the first "end capture" state. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded or if the given /// capture index overflows `usize`. /// /// While the above are the only conditions in which this routine can /// currently return an error, it is possible to call this method with an /// inputs that results in the final `build()` step failing to produce an /// NFA. For example, if one adds two distinct capturing groups with the /// same name, then that will result in `build()` failing with an error. /// /// See the [`GroupInfo`](crate::util::captures::GroupInfo) type for /// more information on what qualifies as valid capturing groups. /// /// # Example /// /// This example shows that an error occurs when one tries to add multiple /// capturing groups with the same name to the same pattern. /// /// ``` /// use regex_automata::{ /// nfa::thompson::Builder, /// util::primitives::StateID, /// }; /// /// let name = Some(std::sync::Arc::from("foo")); /// let mut builder = Builder::new(); /// builder.start_pattern()?; /// // 0th capture group should always be unnamed. /// let start = builder.add_capture_start(StateID::ZERO, 0, None)?; /// // OK /// builder.add_capture_start(StateID::ZERO, 1, name.clone())?; /// // This is not OK, but 'add_capture_start' still succeeds. We don't /// // get an error until we call 'build' below. Without this call, the /// // call to 'build' below would succeed. /// builder.add_capture_start(StateID::ZERO, 2, name.clone())?; /// // Finish our pattern so we can try to build the NFA. /// builder.finish_pattern(start)?; /// let result = builder.build(start, start); /// assert!(result.is_err()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// However, adding multiple capturing groups with the same name to /// distinct patterns is okay: /// /// ``` /// use std::sync::Arc; /// /// use regex_automata::{ /// nfa::thompson::{pikevm::PikeVM, Builder, Transition}, /// util::{ /// captures::Captures, /// primitives::{PatternID, StateID}, /// }, /// Span, /// }; /// /// // Hand-compile the patterns '(?P<foo>[a-z])' and '(?P<foo>[A-Z])'. /// let mut builder = Builder::new(); /// // We compile them to support an unanchored search, which requires /// // adding an implicit '(?s-u:.)*?' prefix before adding either pattern. /// let unanchored_prefix = builder.add_union_reverse(vec![])?; /// let any = builder.add_range(Transition { /// start: b'\x00', end: b'\xFF', next: StateID::ZERO, /// })?; /// builder.patch(unanchored_prefix, any)?; /// builder.patch(any, unanchored_prefix)?; /// /// // Compile an alternation that permits matching multiple patterns. /// let alt = builder.add_union(vec![])?; /// builder.patch(unanchored_prefix, alt)?; /// /// // Compile '(?P<foo>[a-z]+)'. /// builder.start_pattern()?; /// let start0 = builder.add_capture_start(StateID::ZERO, 0, None)?; /// // N.B. 0th capture group must always be unnamed. /// let foo_start0 = builder.add_capture_start( /// StateID::ZERO, 1, Some(Arc::from("foo")), /// )?; /// let lowercase = builder.add_range(Transition { /// start: b'a', end: b'z', next: StateID::ZERO, /// })?; /// let foo_end0 = builder.add_capture_end(StateID::ZERO, 1)?; /// let end0 = builder.add_capture_end(StateID::ZERO, 0)?; /// let match0 = builder.add_match()?; /// builder.patch(start0, foo_start0)?; /// builder.patch(foo_start0, lowercase)?; /// builder.patch(lowercase, foo_end0)?; /// builder.patch(foo_end0, end0)?; /// builder.patch(end0, match0)?; /// builder.finish_pattern(start0)?; /// /// // Compile '(?P<foo>[A-Z]+)'. /// builder.start_pattern()?; /// let start1 = builder.add_capture_start(StateID::ZERO, 0, None)?; /// // N.B. 0th capture group must always be unnamed. /// let foo_start1 = builder.add_capture_start( /// StateID::ZERO, 1, Some(Arc::from("foo")), /// )?; /// let uppercase = builder.add_range(Transition { /// start: b'A', end: b'Z', next: StateID::ZERO, /// })?; /// let foo_end1 = builder.add_capture_end(StateID::ZERO, 1)?; /// let end1 = builder.add_capture_end(StateID::ZERO, 0)?; /// let match1 = builder.add_match()?; /// builder.patch(start1, foo_start1)?; /// builder.patch(foo_start1, uppercase)?; /// builder.patch(uppercase, foo_end1)?; /// builder.patch(foo_end1, end1)?; /// builder.patch(end1, match1)?; /// builder.finish_pattern(start1)?; /// /// // Now add the patterns to our alternation that we started above. /// builder.patch(alt, start0)?; /// builder.patch(alt, start1)?; /// /// // Finally build the NFA. The first argument is the anchored starting /// // state (the pattern alternation) where as the second is the /// // unanchored starting state (the unanchored prefix). /// let nfa = builder.build(alt, unanchored_prefix)?; /// /// // Now build a Pike VM from our NFA and access the 'foo' capture /// // group regardless of which pattern matched, since it is defined /// // for both patterns. /// let vm = PikeVM::new_from_nfa(nfa)?; /// let mut cache = vm.create_cache(); /// let caps: Vec<Captures> = /// vm.captures_iter(&mut cache, "0123aAaAA").collect(); /// assert_eq!(5, caps.len()); /// /// assert_eq!(Some(PatternID::must(0)), caps[0].pattern()); /// assert_eq!(Some(Span::from(4..5)), caps[0].get_group_by_name("foo")); /// /// assert_eq!(Some(PatternID::must(1)), caps[1].pattern()); /// assert_eq!(Some(Span::from(5..6)), caps[1].get_group_by_name("foo")); /// /// assert_eq!(Some(PatternID::must(0)), caps[2].pattern()); /// assert_eq!(Some(Span::from(6..7)), caps[2].get_group_by_name("foo")); /// /// assert_eq!(Some(PatternID::must(1)), caps[3].pattern()); /// assert_eq!(Some(Span::from(7..8)), caps[3].get_group_by_name("foo")); /// /// assert_eq!(Some(PatternID::must(1)), caps[4].pattern()); /// assert_eq!(Some(Span::from(8..9)), caps[4].get_group_by_name("foo")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn add_capture_start( &mut self, next: StateID, group_index: u32, name: Option<Arc<str>>, ) -> Result<StateID, BuildError> { let pid = self.current_pattern_id(); let group_index = match SmallIndex::try_from(group_index) { Err(_) => { return Err(BuildError::invalid_capture_index(group_index)) } Ok(group_index) => group_index, }; // Make sure we have space to insert our (pid,index)|-->name mapping. if pid.as_usize() >= self.captures.len() { for _ in 0..=(pid.as_usize() - self.captures.len()) { self.captures.push(vec![]); } } // In the case where 'group_index < self.captures[pid].len()', it means // that we are adding a duplicate capture group. This is somewhat // weird, but permissible because the capture group itself can be // repeated in the syntax. For example, '([a-z]){4}' will produce 4 // capture groups. In practice, only the last will be set at search // time when a match occurs. For duplicates, we don't need to push // anything other than a CaptureStart NFA state. if group_index.as_usize() >= self.captures[pid].len() { // For discontiguous indices, push placeholders for earlier capture // groups that weren't explicitly added. for _ in 0..(group_index.as_usize() - self.captures[pid].len()) { self.captures[pid].push(None); } self.captures[pid].push(name); } self.add(State::CaptureStart { pattern_id: pid, group_index, next }) } /// Add a "end capture" NFA state. /// /// A "end capture" NFA state corresponds to a state with exactly one /// outgoing unconditional epsilon transition to another state. Unlike /// "empty" states, a "end capture" state also carries with it an /// instruction for saving the current position of input to a particular /// location in memory. NFA simulations, like the Pike VM, may use this /// information to report the match locations of capturing groups in a /// /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), /// and then change it later with [`patch`](Builder::patch). /// /// Note that unlike `start_pattern`/`finish_pattern`, capturing start and /// end states may be interleaved. Indeed, it is typical for many "start /// capture" NFA states to appear before the first "end capture" state. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded or if the given /// capture index overflows `usize`. /// /// While the above are the only conditions in which this routine can /// currently return an error, it is possible to call this method with an /// inputs that results in the final `build()` step failing to produce an /// NFA. For example, if one adds two distinct capturing groups with the /// same name, then that will result in `build()` failing with an error. /// /// See the [`GroupInfo`](crate::util::captures::GroupInfo) type for /// more information on what qualifies as valid capturing groups. pub fn add_capture_end( &mut self, next: StateID, group_index: u32, ) -> Result<StateID, BuildError> { let pid = self.current_pattern_id(); let group_index = match SmallIndex::try_from(group_index) { Err(_) => { return Err(BuildError::invalid_capture_index(group_index)) } Ok(group_index) => group_index, }; self.add(State::CaptureEnd { pattern_id: pid, group_index, next }) } /// Adds a "fail" NFA state. /// /// A "fail" state is simply a state that has no outgoing transitions. It /// acts as a way to cause a search to stop without reporting a match. /// For example, one way to represent an NFA with zero patterns is with a /// single "fail" state. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. pub fn add_fail(&mut self) -> Result<StateID, BuildError> { self.add(State::Fail) } /// Adds a "match" NFA state. /// /// A "match" state has no outgoing transitions (just like a "fail" /// state), but it has special significance in that if a search enters /// this state, then a match has been found. The match state that is added /// automatically has the current pattern ID associated with it. This is /// used to report the matching pattern ID at search time. /// /// # Errors /// /// This returns an error if the state identifier space is exhausted, or if /// the configured heap size limit has been exceeded. /// /// # Panics /// /// This must be called after a `start_pattern` call but before the /// corresponding `finish_pattern` call. Otherwise, it panics. pub fn add_match(&mut self) -> Result<StateID, BuildError> { let pattern_id = self.current_pattern_id(); let sid = self.add(State::Match { pattern_id })?; Ok(sid) } /// The common implementation of "add a state." It handles the common /// error cases of state ID exhausting (by owning state ID allocation) and /// whether the size limit has been exceeded. fn add(&mut self, state: State) -> Result<StateID, BuildError> { let id = StateID::new(self.states.len()) .map_err(|_| BuildError::too_many_states(self.states.len()))?; self.memory_states += state.memory_usage(); self.states.push(state); self.check_size_limit()?; Ok(id) } /// Add a transition from one state to another. /// /// This routine is called "patch" since it is very common to add the /// states you want, typically with "dummy" state ID transitions, and then /// "patch" in the real state IDs later. This is because you don't always /// know all of the necessary state IDs to add because they might not /// exist yet. /// /// # Errors /// /// This may error if patching leads to an increase in heap usage beyond /// the configured size limit. Heap usage only grows when patching adds a /// new transition (as in the case of a "union" state). /// /// # Panics /// /// This panics if `from` corresponds to a "sparse" state. When "sparse" /// states are added, there is no way to patch them after-the-fact. (If you /// have a use case where this would be helpful, please file an issue. It /// will likely require a new API.) pub fn patch( &mut self, from: StateID, to: StateID, ) -> Result<(), BuildError> { let old_memory_states = self.memory_states; match self.states[from] { State::Empty { ref mut next } => { *next = to; } State::ByteRange { ref mut trans } => { trans.next = to; } State::Sparse { .. } => { panic!("cannot patch from a sparse NFA state") } State::Look { ref mut next, .. } => { *next = to; } State::Union { ref mut alternates } => { alternates.push(to); self.memory_states += mem::size_of::<StateID>(); } State::UnionReverse { ref mut alternates } => { alternates.push(to); self.memory_states += mem::size_of::<StateID>(); } State::CaptureStart { ref mut next, .. } => { *next = to; } State::CaptureEnd { ref mut next, .. } => { *next = to; } State::Fail => {} State::Match { .. } => {} } if old_memory_states != self.memory_states { self.check_size_limit()?; } Ok(()) } /// Set whether the NFA produced by this builder should only match UTF-8. /// /// This should be set when both of the following are true: /// /// 1. The caller guarantees that the NFA created by this build will only /// report non-empty matches with spans that are valid UTF-8. /// 2. The caller desires regex engines using this NFA to avoid reporting /// empty matches with a span that splits a valid UTF-8 encoded codepoint. /// /// Property (1) is not checked. Instead, this requires the caller to /// promise that it is true. Property (2) corresponds to the behavior of /// regex engines using the NFA created by this builder. Namely, there /// is no way in the NFA's graph itself to say that empty matches found /// by, for example, the regex `a*` will fall on valid UTF-8 boundaries. /// Instead, this option is used to communicate the UTF-8 semantic to regex /// engines that will typically implement it as a post-processing step by /// filtering out empty matches that don't fall on UTF-8 boundaries. /// /// If you're building an NFA from an HIR (and not using a /// [`thompson::Compiler`](crate::nfa::thompson::Compiler)), then you can /// use the [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) /// option to guarantee that if the HIR detects a non-empty match, then it /// is guaranteed to be valid UTF-8. /// /// Note that property (2) does *not* specify the behavior of executing /// a search on a haystack that is not valid UTF-8. Therefore, if you're /// *not* running this NFA on strings that are guaranteed to be valid /// UTF-8, you almost certainly do not want to enable this option. /// Similarly, if you are running the NFA on strings that *are* guaranteed /// to be valid UTF-8, then you almost certainly want to enable this option /// unless you can guarantee that your NFA will never produce a zero-width /// match. /// /// It is disabled by default. pub fn set_utf8(&mut self, yes: bool) { self.utf8 = yes; } /// Returns whether UTF-8 mode is enabled for this builder. /// /// See [`Builder::set_utf8`] for more details about what "UTF-8 mode" is. pub fn get_utf8(&self) -> bool { self.utf8 } /// Sets whether the NFA produced by this builder should be matched in /// reverse or not. Generally speaking, when enabled, the NFA produced /// should be matched by moving backwards through a haystack, from a higher /// memory address to a lower memory address. /// /// See also [`NFA::is_reverse`] for more details. /// /// This is disabled by default, which means NFAs are by default matched /// in the forward direction. pub fn set_reverse(&mut self, yes: bool) { self.reverse = yes; } /// Returns whether reverse mode is enabled for this builder. /// /// See [`Builder::set_reverse`] for more details about what "reverse mode" /// is. pub fn get_reverse(&self) -> bool { self.reverse } /// Sets the look-around matcher that should be used for the resulting NFA. /// /// A look-around matcher can be used to configure how look-around /// assertions are matched. For example, a matcher might carry /// configuration that changes the line terminator used for `(?m:^)` and /// `(?m:$)` assertions. pub fn set_look_matcher(&mut self, m: LookMatcher) { self.look_matcher = m; } /// Returns the look-around matcher used for this builder. /// /// If a matcher was not explicitly set, then `LookMatcher::default()` is /// returned. pub fn get_look_matcher(&self) -> &LookMatcher { &self.look_matcher } /// Set the size limit on this builder. /// /// Setting the size limit will also check whether the NFA built so far /// fits within the given size limit. If it doesn't, then an error is /// returned. /// /// By default, there is no configured size limit. pub fn set_size_limit( &mut self, limit: Option<usize>, ) -> Result<(), BuildError> { self.size_limit = limit; self.check_size_limit() } /// Return the currently configured size limit. /// /// By default, this returns `None`, which corresponds to no configured /// size limit. pub fn get_size_limit(&self) -> Option<usize> { self.size_limit } /// Returns the heap memory usage, in bytes, used by the NFA states added /// so far. /// /// Note that this is an approximation of how big the final NFA will be. /// In practice, the final NFA will likely be a bit smaller because of /// its simpler state representation. (For example, using things like /// `Box<[StateID]>` instead of `Vec<StateID>`.) pub fn memory_usage(&self) -> usize { self.states.len() * mem::size_of::<State>() + self.memory_states } fn check_size_limit(&self) -> Result<(), BuildError> { if let Some(limit) = self.size_limit { if self.memory_usage() > limit { return Err(BuildError::exceeded_size_limit(limit)); } } Ok(()) } } #[cfg(test)] mod tests { use super::*; // This asserts that a builder state doesn't have its size changed. It is // *really* easy to accidentally increase the size, and thus potentially // dramatically increase the memory usage of NFA builder. // // This assert doesn't mean we absolutely cannot increase the size of a // builder state. We can. It's just here to make sure we do it knowingly // and intentionally. // // A builder state is unfortunately a little bigger than an NFA state, // since we really want to support adding things to a pre-existing state. // i.e., We use Vec<thing> instead of Box<[thing]>. So we end up using an // extra 8 bytes per state. Sad, but at least it gets freed once the NFA // is built. #[test] fn state_has_small_size() { #[cfg(target_pointer_width = "64")] assert_eq!(32, core::mem::size_of::<State>()); #[cfg(target_pointer_width = "32")] assert_eq!(16, core::mem::size_of::<State>()); } } <file_sep>/regex-automata/test #!/bin/bash # This is a script that attempts to *approximately* exhaustively run the test # suite for regex-automata. The main reason for why 'cargo test' isn't enough # is because of crate features. regex-automata has a ton of them. This script # tests many of those feature combinations (although not all) to try to get # decent coverage in a finite amount of time. set -e # cd to the directory containing this crate's Cargo.toml so that we don't need # to pass --manifest-path to every `cargo` command. cd "$(dirname "$0")" echo "===== ALL FEATURES TEST ===" cargo test --all-features # Man I don't *want* to have this many crate features, but... I really want # folks to be able to slim the crate down to just the things they want. But # the main downside is that I just can't feasibly test every combination of # features because there are too many of them. Sad, but I'm not sure if there # is a better alternative. features=( "" "unicode-word-boundary" "unicode-word-boundary,syntax,unicode-perl" "unicode-word-boundary,syntax,dfa-build" "nfa" "dfa" "hybrid" "nfa,dfa" "nfa,hybrid" "dfa,hybrid" "dfa-onepass" "nfa-pikevm" "nfa-backtrack" "std" "alloc" "syntax" "syntax,nfa-pikevm" "syntax,hybrid" "perf-literal-substring" "perf-literal-multisubstring" "meta" "meta,nfa-backtrack" "meta,hybrid" "meta,dfa-build" "meta,dfa-onepass" "meta,nfa,dfa,hybrid,nfa-backtrack" "meta,nfa,dfa,hybrid,nfa-backtrack,perf-literal-substring" "meta,nfa,dfa,hybrid,nfa-backtrack,perf-literal-multisubstring" ) for f in "${features[@]}"; do echo "===== LIB FEATURES: $f ===" # It's actually important to do a standard 'cargo build' in addition to a # 'cargo test'. In particular, in the latter case, the dev-dependencies may # wind up enabling features in dependencies (like memchr) that make it look # like everything is well, but actually isn't. For example, the 'regex-test' # dev-dependency uses 'bstr' and enables its 'std' feature, which in turn # unconditionally enables 'memchr's 'std' feature. Since we're specifically # looking to test that certain feature combinations work as expected, this # can lead to things testing okay, but would actually fail to build. Yikes. cargo build --no-default-features --lib --features "$f" cargo test --no-default-features --lib --features "$f" done # We can also run the integration test suite on stripped down features too. # But the test suite doesn't do well with things like 'std' and 'unicode' # disabled, so we always enable them. features=( "std,unicode,syntax,nfa-pikevm" "std,unicode,syntax,nfa-backtrack" "std,unicode,syntax,hybrid" "std,unicode,syntax,dfa-onepass" "std,unicode,syntax,dfa-search" "std,unicode,syntax,dfa-build" "std,unicode,meta" # This one is a little tricky because it causes the backtracker to get used # in more instances and results in failing tests for the 'earliest' tests. # The actual results are semantically consistent with the API guarantee # (the backtracker tends to report greater offsets because it isn't an FSM), # but our tests are less flexible than the API guarantee and demand offsets # reported by FSM regex engines. (Which is... all of them except for the # backtracker.) # "std,unicode,meta,nfa-backtrack" "std,unicode,meta,hybrid" "std,unicode,meta,dfa-onepass" "std,unicode,meta,dfa-build" "std,unicode,meta,nfa,dfa-onepass,hybrid" ) for f in "${features[@]}"; do echo "===== INTEGRATION FEATURES: $f ===" cargo build --no-default-features --lib --features "$f" cargo test --no-default-features --test integration --features "$f" done <file_sep>/src/builders.rs #![allow(warnings)] // This module defines an internal builder that encapsulates all interaction // with meta::Regex construction, and then 4 public API builders that wrap // around it. The docs are essentially repeated on each of the 4 public // builders, with tweaks to the examples as needed. // // The reason why there are so many builders is partially because of a misstep // in the initial API design: the builder constructor takes in the pattern // strings instead of using the `build` method to accept the pattern strings. // This means `new` has a different signature for each builder. It probably // would have been nicer to to use one builder with `fn new()`, and then add // `build(pat)` and `build_many(pats)` constructors. // // The other reason is because I think the `bytes` module should probably // have its own builder type. That way, it is completely isolated from the // top-level API. // // If I could do it again, I'd probably have a `regex::Builder` and a // `regex::bytes::Builder`. Each would have `build` and `build_set` (or // `build_many`) methods for constructing a single pattern `Regex` and a // multi-pattern `RegexSet`, respectively. use alloc::{ string::{String, ToString}, sync::Arc, vec, vec::Vec, }; use regex_automata::{ meta, nfa::thompson::WhichCaptures, util::syntax, MatchKind, }; use crate::error::Error; /// A builder for constructing a `Regex`, `bytes::Regex`, `RegexSet` or a /// `bytes::RegexSet`. /// /// This is essentially the implementation of the four different builder types /// in the public API: `RegexBuilder`, `bytes::RegexBuilder`, `RegexSetBuilder` /// and `bytes::RegexSetBuilder`. #[derive(Clone, Debug)] struct Builder { pats: Vec<String>, metac: meta::Config, syntaxc: syntax::Config, } impl Default for Builder { fn default() -> Builder { let metac = meta::Config::new() .nfa_size_limit(Some(10 * (1 << 20))) .hybrid_cache_capacity(2 * (1 << 20)); Builder { pats: vec![], metac, syntaxc: syntax::Config::default() } } } impl Builder { fn new<I, S>(patterns: I) -> Builder where S: AsRef<str>, I: IntoIterator<Item = S>, { let mut b = Builder::default(); b.pats.extend(patterns.into_iter().map(|p| p.as_ref().to_string())); b } fn build_one_string(&self) -> Result<crate::Regex, Error> { assert_eq!(1, self.pats.len()); let metac = self .metac .clone() .match_kind(MatchKind::LeftmostFirst) .utf8_empty(true); let syntaxc = self.syntaxc.clone().utf8(true); let pattern = Arc::from(self.pats[0].as_str()); meta::Builder::new() .configure(metac) .syntax(syntaxc) .build(&pattern) .map(|meta| crate::Regex { meta, pattern }) .map_err(Error::from_meta_build_error) } fn build_one_bytes(&self) -> Result<crate::bytes::Regex, Error> { assert_eq!(1, self.pats.len()); let metac = self .metac .clone() .match_kind(MatchKind::LeftmostFirst) .utf8_empty(false); let syntaxc = self.syntaxc.clone().utf8(false); let pattern = Arc::from(self.pats[0].as_str()); meta::Builder::new() .configure(metac) .syntax(syntaxc) .build(&pattern) .map(|meta| crate::bytes::Regex { meta, pattern }) .map_err(Error::from_meta_build_error) } fn build_many_string(&self) -> Result<crate::RegexSet, Error> { let metac = self .metac .clone() .match_kind(MatchKind::All) .utf8_empty(true) .which_captures(WhichCaptures::None); let syntaxc = self.syntaxc.clone().utf8(true); let patterns = Arc::from(self.pats.as_slice()); meta::Builder::new() .configure(metac) .syntax(syntaxc) .build_many(&patterns) .map(|meta| crate::RegexSet { meta, patterns }) .map_err(Error::from_meta_build_error) } fn build_many_bytes(&self) -> Result<crate::bytes::RegexSet, Error> { let metac = self .metac .clone() .match_kind(MatchKind::All) .utf8_empty(false) .which_captures(WhichCaptures::None); let syntaxc = self.syntaxc.clone().utf8(false); let patterns = Arc::from(self.pats.as_slice()); meta::Builder::new() .configure(metac) .syntax(syntaxc) .build_many(&patterns) .map(|meta| crate::bytes::RegexSet { meta, patterns }) .map_err(Error::from_meta_build_error) } fn case_insensitive(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.case_insensitive(yes); self } fn multi_line(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.multi_line(yes); self } fn dot_matches_new_line(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.dot_matches_new_line(yes); self } fn crlf(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.crlf(yes); self } fn line_terminator(&mut self, byte: u8) -> &mut Builder { self.metac = self.metac.clone().line_terminator(byte); self.syntaxc = self.syntaxc.line_terminator(byte); self } fn swap_greed(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.swap_greed(yes); self } fn ignore_whitespace(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.ignore_whitespace(yes); self } fn unicode(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.unicode(yes); self } fn octal(&mut self, yes: bool) -> &mut Builder { self.syntaxc = self.syntaxc.octal(yes); self } fn size_limit(&mut self, limit: usize) -> &mut Builder { self.metac = self.metac.clone().nfa_size_limit(Some(limit)); self } fn dfa_size_limit(&mut self, limit: usize) -> &mut Builder { self.metac = self.metac.clone().hybrid_cache_capacity(limit); self } fn nest_limit(&mut self, limit: u32) -> &mut Builder { self.syntaxc = self.syntaxc.nest_limit(limit); self } } pub(crate) mod string { use crate::{error::Error, Regex, RegexSet}; use super::Builder; /// A configurable builder for a [`Regex`]. /// /// This builder can be used to programmatically set flags such as `i` /// (case insensitive) and `x` (for verbose mode). This builder can also be /// used to configure things like the line terminator and a size limit on /// the compiled regular expression. #[derive(Clone, Debug)] pub struct RegexBuilder { builder: Builder, } impl RegexBuilder { /// Create a new builder with a default configuration for the given /// pattern. /// /// If the pattern is invalid or exceeds the configured size limits, /// then an error will be returned when [`RegexBuilder::build`] is /// called. pub fn new(pattern: &str) -> RegexBuilder { RegexBuilder { builder: Builder::new([pattern]) } } /// Compiles the pattern given to `RegexBuilder::new` with the /// configuration set on this builder. /// /// If the pattern isn't a valid regex or if a configured size limit /// was exceeded, then an error is returned. pub fn build(&self) -> Result<Regex, Error> { self.builder.build_one_string() } /// This configures Unicode mode for the entire pattern. /// /// Enabling Unicode mode does a number of things: /// /// * Most fundamentally, it causes the fundamental atom of matching /// to be a single codepoint. When Unicode mode is disabled, it's a /// single byte. For example, when Unicode mode is enabled, `.` will /// match `💩` once, where as it will match 4 times when Unicode mode /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) /// * Case insensitive matching uses Unicode simple case folding rules. /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are /// available. /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and /// `\d`. /// * The word boundary assertions, `\b` and `\B`, use the Unicode /// definition of a word character. /// /// Note that if Unicode mode is disabled, then the regex will fail to /// compile if it could match invalid UTF-8. For example, when Unicode /// mode is disabled, then since `.` matches any byte (except for /// `\n`), then it can match invalid UTF-8 and thus building a regex /// from it will fail. Another example is `\w` and `\W`. Since `\w` can /// only match ASCII bytes when Unicode mode is disabled, it's allowed. /// But `\W` can match more than ASCII bytes, including invalid UTF-8, /// and so it is not allowed. This restriction can be lifted only by /// using a [`bytes::Regex`](crate::bytes::Regex). /// /// For more details on the Unicode support in this crate, see the /// [Unicode section](crate#unicode) in this crate's top-level /// documentation. /// /// The default for this is `true`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"\w") /// .unicode(false) /// .build() /// .unwrap(); /// // Normally greek letters would be included in \w, but since /// // Unicode mode is disabled, it only matches ASCII letters. /// assert!(!re.is_match("δ")); /// /// let re = RegexBuilder::new(r"s") /// .case_insensitive(true) /// .unicode(false) /// .build() /// .unwrap(); /// // Normally 'ſ' is included when searching for 's' case /// // insensitively due to Unicode's simple case folding rules. But /// // when Unicode mode is disabled, only ASCII case insensitive rules /// // are used. /// assert!(!re.is_match("ſ")); /// ``` pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.unicode(yes); self } /// This configures whether to enable case insensitive matching for the /// entire pattern. /// /// This setting can also be configured using the inline flag `i` /// in the pattern. For example, `(?i:foo)` matches `foo` case /// insensitively while `(?-i:foo)` matches `foo` case sensitively. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"foo(?-i:bar)quux") /// .case_insensitive(true) /// .build() /// .unwrap(); /// assert!(re.is_match("FoObarQuUx")); /// // Even though case insensitive matching is enabled in the builder, /// // it can be locally disabled within the pattern. In this case, /// // `bar` is matched case sensitively. /// assert!(!re.is_match("fooBARquux")); /// ``` pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.case_insensitive(yes); self } /// This configures multi-line mode for the entire pattern. /// /// Enabling multi-line mode changes the behavior of the `^` and `$` /// anchor assertions. Instead of only matching at the beginning and /// end of a haystack, respectively, multi-line mode causes them to /// match at the beginning and end of a line *in addition* to the /// beginning and end of a haystack. More precisely, `^` will match at /// the position immediately following a `\n` and `$` will match at the /// position immediately preceding a `\n`. /// /// The behavior of this option can be impacted by other settings too: /// /// * The [`RegexBuilder::line_terminator`] option changes `\n` above /// to any ASCII byte. /// * The [`RegexBuilder::crlf`] option changes the line terminator to /// be either `\r` or `\n`, but never at the position between a `\r` /// and `\n`. /// /// This setting can also be configured using the inline flag `m` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .build() /// .unwrap(); /// assert_eq!(Some(1..4), re.find("\nfoo\n").map(|m| m.range())); /// ``` pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.multi_line(yes); self } /// This configures dot-matches-new-line mode for the entire pattern. /// /// Perhaps surprisingly, the default behavior for `.` is not to match /// any character, but rather, to match any character except for the /// line terminator (which is `\n` by default). When this mode is /// enabled, the behavior changes such that `.` truly matches any /// character. /// /// This setting can also be configured using the inline flag `s` in /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent /// regexes. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"foo.bar") /// .dot_matches_new_line(true) /// .build() /// .unwrap(); /// let hay = "foo\nbar"; /// assert_eq!(Some("foo\nbar"), re.find(hay).map(|m| m.as_str())); /// ``` pub fn dot_matches_new_line( &mut self, yes: bool, ) -> &mut RegexBuilder { self.builder.dot_matches_new_line(yes); self } /// This configures CRLF mode for the entire pattern. /// /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for /// short) and `\n` ("line feed" or LF for short) are treated as line /// terminators. This results in the following: /// /// * Unless dot-matches-new-line mode is enabled, `.` will now match /// any character except for `\n` and `\r`. /// * When multi-line mode is enabled, `^` will match immediately /// following a `\n` or a `\r`. Similarly, `$` will match immediately /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match /// between `\r` and `\n`. /// /// This setting can also be configured using the inline flag `R` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = "\r\nfoo\r\n"; /// // If CRLF mode weren't enabled here, then '$' wouldn't match /// // immediately after 'foo', and thus no match would be found. /// assert_eq!(Some("foo"), re.find(hay).map(|m| m.as_str())); /// ``` /// /// This example demonstrates that `^` will never match at a position /// between `\r` and `\n`. (`$` will similarly not match between a `\r` /// and a `\n`.) /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"^") /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = "\r\n\r\n"; /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect(); /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]); /// ``` pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.crlf(yes); self } /// Configures the line terminator to be used by the regex. /// /// The line terminator is relevant in two ways for a particular regex: /// /// * When dot-matches-new-line mode is *not* enabled (the default), /// then `.` will match any character except for the configured line /// terminator. /// * When multi-line mode is enabled (not the default), then `^` and /// `$` will match immediately after and before, respectively, a line /// terminator. /// /// In both cases, if CRLF mode is enabled in a particular context, /// then it takes precedence over any configured line terminator. /// /// This option cannot be configured from within the pattern. /// /// The default line terminator is `\n`. /// /// # Example /// /// This shows how to treat the NUL byte as a line terminator. This can /// be a useful heuristic when searching binary data. /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// let hay = "\x00foo\x00"; /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range())); /// ``` /// /// This example shows that the behavior of `.` is impacted by this /// setting as well: /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r".") /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// assert!(re.is_match("\n")); /// assert!(!re.is_match("\x00")); /// ``` /// /// This shows that building a regex will fail if the byte given /// is not ASCII and the pattern could result in matching invalid /// UTF-8. This is because any singular non-ASCII byte is not valid /// UTF-8, and it is not permitted for a [`Regex`] to match invalid /// UTF-8. (It is permissible to use a non-ASCII byte when building a /// [`bytes::Regex`](crate::bytes::Regex).) /// /// ``` /// use regex::RegexBuilder; /// /// assert!(RegexBuilder::new(r".").line_terminator(0x80).build().is_err()); /// // Note that using a non-ASCII byte isn't enough on its own to /// // cause regex compilation to fail. You actually have to make use /// // of it in the regex in a way that leads to matching invalid /// // UTF-8. If you don't, then regex compilation will succeed! /// assert!(RegexBuilder::new(r"a").line_terminator(0x80).build().is_ok()); /// ``` pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder { self.builder.line_terminator(byte); self } /// This configures swap-greed mode for the entire pattern. /// /// When swap-greed mode is enabled, patterns like `a+` will become /// non-greedy and patterns like `a+?` will become greedy. In other /// words, the meanings of `a+` and `a+?` are switched. /// /// This setting can also be configured using the inline flag `U` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// let re = RegexBuilder::new(r"a+") /// .swap_greed(true) /// .build() /// .unwrap(); /// assert_eq!(Some("a"), re.find("aaa").map(|m| m.as_str())); /// ``` pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.swap_greed(yes); self } /// This configures verbose mode for the entire pattern. /// /// When enabled, whitespace will treated as insignifcant in the /// pattern and `#` can be used to start a comment until the next new /// line. /// /// Normally, in most places in a pattern, whitespace is treated /// literally. For example ` +` will match one or more ASCII whitespace /// characters. /// /// When verbose mode is enabled, `\#` can be used to match a literal /// `#` and `\ ` can be used to match a literal ASCII whitespace /// character. /// /// Verbose mode is useful for permitting regexes to be formatted and /// broken up more nicely. This may make them more easily readable. /// /// This setting can also be configured using the inline flag `x` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// let pat = r" /// \b /// (?<first>\p{Uppercase}\w*) # always start with uppercase letter /// [\s--\n]+ # whitespace should separate names /// (?: # middle name can be an initial! /// (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*)) /// [\s--\n]+ /// )? /// (?<last>\p{Uppercase}\w*) /// \b /// "; /// let re = RegexBuilder::new(pat) /// .ignore_whitespace(true) /// .build() /// .unwrap(); /// /// let caps = re.captures("<NAME>").unwrap(); /// assert_eq!("Harry", &caps["first"]); /// assert_eq!("Potter", &caps["last"]); /// /// let caps = re.captures("<NAME>").unwrap(); /// assert_eq!("Harry", &caps["first"]); /// // Since a middle name/initial isn't required for an overall match, /// // we can't assume that 'initial' or 'middle' will be populated! /// assert_eq!(Some("J"), caps.name("initial").map(|m| m.as_str())); /// assert_eq!(None, caps.name("middle").map(|m| m.as_str())); /// assert_eq!("Potter", &caps["last"]); /// /// let caps = re.captures("<NAME>").unwrap(); /// assert_eq!("Harry", &caps["first"]); /// // Since a middle name/initial isn't required for an overall match, /// // we can't assume that 'initial' or 'middle' will be populated! /// assert_eq!(None, caps.name("initial").map(|m| m.as_str())); /// assert_eq!(Some("James"), caps.name("middle").map(|m| m.as_str())); /// assert_eq!("Potter", &caps["last"]); /// ``` pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.ignore_whitespace(yes); self } /// This configures octal mode for the entire pattern. /// /// Octal syntax is a little-known way of uttering Unicode codepoints /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all /// equivalent patterns, where the last example shows octal syntax. /// /// While supporting octal syntax isn't in and of itself a problem, /// it does make good error messages harder. That is, in PCRE based /// regex engines, syntax like `\1` invokes a backreference, which is /// explicitly unsupported this library. However, many users expect /// backreferences to be supported. Therefore, when octal support /// is disabled, the error message will explicitly mention that /// backreferences aren't supported. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// // Normally this pattern would not compile, with an error message /// // about backreferences not being supported. But with octal mode /// // enabled, octal escape sequences work. /// let re = RegexBuilder::new(r"\141") /// .octal(true) /// .build() /// .unwrap(); /// assert!(re.is_match("a")); /// ``` pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.octal(yes); self } /// Sets the approximate size limit, in bytes, of the compiled regex. /// /// This roughly corresponds to the number of heap memory, in /// bytes, occupied by a single regex. If the regex would otherwise /// approximately exceed this limit, then compiling that regex will /// fail. /// /// The main utility of a method like this is to avoid compiling /// regexes that use an unexpected amount of resources, such as /// time and memory. Even if the memory usage of a large regex is /// acceptable, its search time may not be. Namely, worst case time /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and /// `n ~ len(haystack)`. That is, search time depends, in part, on the /// size of the compiled regex. This means that putting a limit on the /// size of the regex limits how much a regex can impact search time. /// /// For more information about regex size limits, see the section on /// [untrusted inputs](crate#untrusted-input) in the top-level crate /// documentation. /// /// The default for this is some reasonable number that permits most /// patterns to compile successfully. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// // It may surprise you how big some seemingly small patterns can /// // be! Since \w is Unicode aware, this generates a regex that can /// // match approximately 140,000 distinct codepoints. /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err()); /// ``` pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { self.builder.size_limit(bytes); self } /// Set the approximate capacity, in bytes, of the cache of transitions /// used by the lazy DFA. /// /// While the lazy DFA isn't always used, in tends to be the most /// commonly use regex engine in default configurations. It tends to /// adopt the performance profile of a fully build DFA, but without the /// downside of taking worst case exponential time to build. /// /// The downside is that it needs to keep a cache of transitions and /// states that are built while running a search, and this cache /// can fill up. When it fills up, the cache will reset itself. Any /// previously generated states and transitions will then need to be /// re-generated. If this happens too many times, then this library /// will bail out of using the lazy DFA and switch to a different regex /// engine. /// /// If your regex provokes this particular downside of the lazy DFA, /// then it may be beneficial to increase its cache capacity. This will /// potentially reduce the frequency of cache resetting (ideally to /// `0`). While it won't fix all potential performance problems with /// the lazy DFA, increasing the cache capacity does fix some. /// /// There is no easy way to determine, a priori, whether increasing /// this cache capacity will help. In general, the larger your regex, /// the more cache it's likely to use. But that isn't an ironclad rule. /// For example, a regex like `[01]*1[01]{N}` would normally produce a /// fully build DFA that is exponential in size with respect to `N`. /// The lazy DFA will prevent exponential space blow-up, but it cache /// is likely to fill up, even when it's large and even for smallish /// values of `N`. /// /// If you aren't sure whether this helps or not, it is sensible to /// set this to some arbitrarily large number in testing, such as /// `usize::MAX`. Namely, this represents the amount of capacity that /// *may* be used. It's probably not a good idea to use `usize::MAX` in /// production though, since it implies there are no controls on heap /// memory used by this library during a search. In effect, set it to /// whatever you're willing to allocate for a single regex search. pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { self.builder.dfa_size_limit(bytes); self } /// Set the nesting limit for this parser. /// /// The nesting limit controls how deep the abstract syntax tree is /// allowed to be. If the AST exceeds the given limit (e.g., with too /// many nested groups), then an error is returned by the parser. /// /// The purpose of this limit is to act as a heuristic to prevent stack /// overflow for consumers that do structural induction on an AST using /// explicit recursion. While this crate never does this (instead using /// constant stack space and moving the call stack to the heap), other /// crates may. /// /// This limit is not checked until the entire AST is parsed. /// Therefore, if callers want to put a limit on the amount of heap /// space used, then they should impose a limit on the length, in /// bytes, of the concrete pattern string. In particular, this is /// viable since this parser implementation will limit itself to heap /// space proportional to the length of the pattern string. See also /// the [untrusted inputs](crate#untrusted-input) section in the /// top-level crate documentation for more information about this. /// /// Note that a nest limit of `0` will return a nest limit error for /// most patterns but not all. For example, a nest limit of `0` permits /// `a` but not `ab`, since `ab` requires an explicit concatenation, /// which results in a nest depth of `1`. In general, a nest limit is /// not something that manifests in an obvious way in the concrete /// syntax, therefore, it should not be used in a granular way. /// /// # Example /// /// ``` /// use regex::RegexBuilder; /// /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok()); /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err()); /// ``` pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { self.builder.nest_limit(limit); self } } /// A configurable builder for a [`RegexSet`]. /// /// This builder can be used to programmatically set flags such as /// `i` (case insensitive) and `x` (for verbose mode). This builder /// can also be used to configure things like the line terminator /// and a size limit on the compiled regular expression. #[derive(Clone, Debug)] pub struct RegexSetBuilder { builder: Builder, } impl RegexSetBuilder { /// Create a new builder with a default configuration for the given /// patterns. /// /// If the patterns are invalid or exceed the configured size limits, /// then an error will be returned when [`RegexSetBuilder::build`] is /// called. pub fn new<I, S>(patterns: I) -> RegexSetBuilder where I: IntoIterator<Item = S>, S: AsRef<str>, { RegexSetBuilder { builder: Builder::new(patterns) } } /// Compiles the patterns given to `RegexSetBuilder::new` with the /// configuration set on this builder. /// /// If the patterns aren't valid regexes or if a configured size limit /// was exceeded, then an error is returned. pub fn build(&self) -> Result<RegexSet, Error> { self.builder.build_many_string() } /// This configures Unicode mode for the all of the patterns. /// /// Enabling Unicode mode does a number of things: /// /// * Most fundamentally, it causes the fundamental atom of matching /// to be a single codepoint. When Unicode mode is disabled, it's a /// single byte. For example, when Unicode mode is enabled, `.` will /// match `💩` once, where as it will match 4 times when Unicode mode /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) /// * Case insensitive matching uses Unicode simple case folding rules. /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are /// available. /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and /// `\d`. /// * The word boundary assertions, `\b` and `\B`, use the Unicode /// definition of a word character. /// /// Note that if Unicode mode is disabled, then the regex will fail to /// compile if it could match invalid UTF-8. For example, when Unicode /// mode is disabled, then since `.` matches any byte (except for /// `\n`), then it can match invalid UTF-8 and thus building a regex /// from it will fail. Another example is `\w` and `\W`. Since `\w` can /// only match ASCII bytes when Unicode mode is disabled, it's allowed. /// But `\W` can match more than ASCII bytes, including invalid UTF-8, /// and so it is not allowed. This restriction can be lifted only by /// using a [`bytes::RegexSet`](crate::bytes::RegexSet). /// /// For more details on the Unicode support in this crate, see the /// [Unicode section](crate#unicode) in this crate's top-level /// documentation. /// /// The default for this is `true`. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"\w"]) /// .unicode(false) /// .build() /// .unwrap(); /// // Normally greek letters would be included in \w, but since /// // Unicode mode is disabled, it only matches ASCII letters. /// assert!(!re.is_match("δ")); /// /// let re = RegexSetBuilder::new([r"s"]) /// .case_insensitive(true) /// .unicode(false) /// .build() /// .unwrap(); /// // Normally 'ſ' is included when searching for 's' case /// // insensitively due to Unicode's simple case folding rules. But /// // when Unicode mode is disabled, only ASCII case insensitive rules /// // are used. /// assert!(!re.is_match("ſ")); /// ``` pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.unicode(yes); self } /// This configures whether to enable case insensitive matching for all /// of the patterns. /// /// This setting can also be configured using the inline flag `i` /// in the pattern. For example, `(?i:foo)` matches `foo` case /// insensitively while `(?-i:foo)` matches `foo` case sensitively. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"]) /// .case_insensitive(true) /// .build() /// .unwrap(); /// assert!(re.is_match("FoObarQuUx")); /// // Even though case insensitive matching is enabled in the builder, /// // it can be locally disabled within the pattern. In this case, /// // `bar` is matched case sensitively. /// assert!(!re.is_match("fooBARquux")); /// ``` pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.case_insensitive(yes); self } /// This configures multi-line mode for all of the patterns. /// /// Enabling multi-line mode changes the behavior of the `^` and `$` /// anchor assertions. Instead of only matching at the beginning and /// end of a haystack, respectively, multi-line mode causes them to /// match at the beginning and end of a line *in addition* to the /// beginning and end of a haystack. More precisely, `^` will match at /// the position immediately following a `\n` and `$` will match at the /// position immediately preceding a `\n`. /// /// The behavior of this option can be impacted by other settings too: /// /// * The [`RegexSetBuilder::line_terminator`] option changes `\n` /// above to any ASCII byte. /// * The [`RegexSetBuilder::crlf`] option changes the line terminator /// to be either `\r` or `\n`, but never at the position between a `\r` /// and `\n`. /// /// This setting can also be configured using the inline flag `m` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^foo$"]) /// .multi_line(true) /// .build() /// .unwrap(); /// assert!(re.is_match("\nfoo\n")); /// ``` pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.multi_line(yes); self } /// This configures dot-matches-new-line mode for the entire pattern. /// /// Perhaps surprisingly, the default behavior for `.` is not to match /// any character, but rather, to match any character except for the /// line terminator (which is `\n` by default). When this mode is /// enabled, the behavior changes such that `.` truly matches any /// character. /// /// This setting can also be configured using the inline flag `s` in /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent /// regexes. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"foo.bar"]) /// .dot_matches_new_line(true) /// .build() /// .unwrap(); /// let hay = "foo\nbar"; /// assert!(re.is_match(hay)); /// ``` pub fn dot_matches_new_line( &mut self, yes: bool, ) -> &mut RegexSetBuilder { self.builder.dot_matches_new_line(yes); self } /// This configures CRLF mode for all of the patterns. /// /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for /// short) and `\n` ("line feed" or LF for short) are treated as line /// terminators. This results in the following: /// /// * Unless dot-matches-new-line mode is enabled, `.` will now match /// any character except for `\n` and `\r`. /// * When multi-line mode is enabled, `^` will match immediately /// following a `\n` or a `\r`. Similarly, `$` will match immediately /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match /// between `\r` and `\n`. /// /// This setting can also be configured using the inline flag `R` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^foo$"]) /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = "\r\nfoo\r\n"; /// // If CRLF mode weren't enabled here, then '$' wouldn't match /// // immediately after 'foo', and thus no match would be found. /// assert!(re.is_match(hay)); /// ``` /// /// This example demonstrates that `^` will never match at a position /// between `\r` and `\n`. (`$` will similarly not match between a `\r` /// and a `\n`.) /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^\n"]) /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// assert!(!re.is_match("\r\n")); /// ``` pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.crlf(yes); self } /// Configures the line terminator to be used by the regex. /// /// The line terminator is relevant in two ways for a particular regex: /// /// * When dot-matches-new-line mode is *not* enabled (the default), /// then `.` will match any character except for the configured line /// terminator. /// * When multi-line mode is enabled (not the default), then `^` and /// `$` will match immediately after and before, respectively, a line /// terminator. /// /// In both cases, if CRLF mode is enabled in a particular context, /// then it takes precedence over any configured line terminator. /// /// This option cannot be configured from within the pattern. /// /// The default line terminator is `\n`. /// /// # Example /// /// This shows how to treat the NUL byte as a line terminator. This can /// be a useful heuristic when searching binary data. /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^foo$"]) /// .multi_line(true) /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// let hay = "\x00foo\x00"; /// assert!(re.is_match(hay)); /// ``` /// /// This example shows that the behavior of `.` is impacted by this /// setting as well: /// /// ``` /// use regex::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"."]) /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// assert!(re.is_match("\n")); /// assert!(!re.is_match("\x00")); /// ``` /// /// This shows that building a regex will fail if the byte given /// is not ASCII and the pattern could result in matching invalid /// UTF-8. This is because any singular non-ASCII byte is not valid /// UTF-8, and it is not permitted for a [`RegexSet`] to match invalid /// UTF-8. (It is permissible to use a non-ASCII byte when building a /// [`bytes::RegexSet`](crate::bytes::RegexSet).) /// /// ``` /// use regex::RegexSetBuilder; /// /// assert!( /// RegexSetBuilder::new([r"."]) /// .line_terminator(0x80) /// .build() /// .is_err() /// ); /// // Note that using a non-ASCII byte isn't enough on its own to /// // cause regex compilation to fail. You actually have to make use /// // of it in the regex in a way that leads to matching invalid /// // UTF-8. If you don't, then regex compilation will succeed! /// assert!( /// RegexSetBuilder::new([r"a"]) /// .line_terminator(0x80) /// .build() /// .is_ok() /// ); /// ``` pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder { self.builder.line_terminator(byte); self } /// This configures swap-greed mode for all of the patterns. /// /// When swap-greed mode is enabled, patterns like `a+` will become /// non-greedy and patterns like `a+?` will become greedy. In other /// words, the meanings of `a+` and `a+?` are switched. /// /// This setting can also be configured using the inline flag `U` in /// the pattern. /// /// Note that this is generally not useful for a `RegexSet` since a /// `RegexSet` can only report whether a pattern matches or not. Since /// greediness never impacts whether a match is found or not (only the /// offsets of the match), it follows that whether parts of a pattern /// are greedy or not doesn't matter for a `RegexSet`. /// /// The default for this is `false`. pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.swap_greed(yes); self } /// This configures verbose mode for all of the patterns. /// /// When enabled, whitespace will treated as insignifcant in the /// pattern and `#` can be used to start a comment until the next new /// line. /// /// Normally, in most places in a pattern, whitespace is treated /// literally. For example ` +` will match one or more ASCII whitespace /// characters. /// /// When verbose mode is enabled, `\#` can be used to match a literal /// `#` and `\ ` can be used to match a literal ASCII whitespace /// character. /// /// Verbose mode is useful for permitting regexes to be formatted and /// broken up more nicely. This may make them more easily readable. /// /// This setting can also be configured using the inline flag `x` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// let pat = r" /// \b /// (?<first>\p{Uppercase}\w*) # always start with uppercase letter /// [\s--\n]+ # whitespace should separate names /// (?: # middle name can be an initial! /// (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*)) /// [\s--\n]+ /// )? /// (?<last>\p{Uppercase}\w*) /// \b /// "; /// let re = RegexSetBuilder::new([pat]) /// .ignore_whitespace(true) /// .build() /// .unwrap(); /// assert!(re.is_match("<NAME>")); /// assert!(re.is_match("<NAME>")); /// assert!(re.is_match("<NAME>")); /// assert!(!re.is_match("<NAME>")); /// ``` pub fn ignore_whitespace( &mut self, yes: bool, ) -> &mut RegexSetBuilder { self.builder.ignore_whitespace(yes); self } /// This configures octal mode for all of the patterns. /// /// Octal syntax is a little-known way of uttering Unicode codepoints /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all /// equivalent patterns, where the last example shows octal syntax. /// /// While supporting octal syntax isn't in and of itself a problem, /// it does make good error messages harder. That is, in PCRE based /// regex engines, syntax like `\1` invokes a backreference, which is /// explicitly unsupported this library. However, many users expect /// backreferences to be supported. Therefore, when octal support /// is disabled, the error message will explicitly mention that /// backreferences aren't supported. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// // Normally this pattern would not compile, with an error message /// // about backreferences not being supported. But with octal mode /// // enabled, octal escape sequences work. /// let re = RegexSetBuilder::new([r"\141"]) /// .octal(true) /// .build() /// .unwrap(); /// assert!(re.is_match("a")); /// ``` pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.octal(yes); self } /// Sets the approximate size limit, in bytes, of the compiled regex. /// /// This roughly corresponds to the number of heap memory, in /// bytes, occupied by a single regex. If the regex would otherwise /// approximately exceed this limit, then compiling that regex will /// fail. /// /// The main utility of a method like this is to avoid compiling /// regexes that use an unexpected amount of resources, such as /// time and memory. Even if the memory usage of a large regex is /// acceptable, its search time may not be. Namely, worst case time /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and /// `n ~ len(haystack)`. That is, search time depends, in part, on the /// size of the compiled regex. This means that putting a limit on the /// size of the regex limits how much a regex can impact search time. /// /// For more information about regex size limits, see the section on /// [untrusted inputs](crate#untrusted-input) in the top-level crate /// documentation. /// /// The default for this is some reasonable number that permits most /// patterns to compile successfully. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// // It may surprise you how big some seemingly small patterns can /// // be! Since \w is Unicode aware, this generates a regex that can /// // match approximately 140,000 distinct codepoints. /// assert!( /// RegexSetBuilder::new([r"\w"]) /// .size_limit(45_000) /// .build() /// .is_err() /// ); /// ``` pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder { self.builder.size_limit(bytes); self } /// Set the approximate capacity, in bytes, of the cache of transitions /// used by the lazy DFA. /// /// While the lazy DFA isn't always used, in tends to be the most /// commonly use regex engine in default configurations. It tends to /// adopt the performance profile of a fully build DFA, but without the /// downside of taking worst case exponential time to build. /// /// The downside is that it needs to keep a cache of transitions and /// states that are built while running a search, and this cache /// can fill up. When it fills up, the cache will reset itself. Any /// previously generated states and transitions will then need to be /// re-generated. If this happens too many times, then this library /// will bail out of using the lazy DFA and switch to a different regex /// engine. /// /// If your regex provokes this particular downside of the lazy DFA, /// then it may be beneficial to increase its cache capacity. This will /// potentially reduce the frequency of cache resetting (ideally to /// `0`). While it won't fix all potential performance problems with /// the lazy DFA, increasing the cache capacity does fix some. /// /// There is no easy way to determine, a priori, whether increasing /// this cache capacity will help. In general, the larger your regex, /// the more cache it's likely to use. But that isn't an ironclad rule. /// For example, a regex like `[01]*1[01]{N}` would normally produce a /// fully build DFA that is exponential in size with respect to `N`. /// The lazy DFA will prevent exponential space blow-up, but it cache /// is likely to fill up, even when it's large and even for smallish /// values of `N`. /// /// If you aren't sure whether this helps or not, it is sensible to /// set this to some arbitrarily large number in testing, such as /// `usize::MAX`. Namely, this represents the amount of capacity that /// *may* be used. It's probably not a good idea to use `usize::MAX` in /// production though, since it implies there are no controls on heap /// memory used by this library during a search. In effect, set it to /// whatever you're willing to allocate for a single regex search. pub fn dfa_size_limit( &mut self, bytes: usize, ) -> &mut RegexSetBuilder { self.builder.dfa_size_limit(bytes); self } /// Set the nesting limit for this parser. /// /// The nesting limit controls how deep the abstract syntax tree is /// allowed to be. If the AST exceeds the given limit (e.g., with too /// many nested groups), then an error is returned by the parser. /// /// The purpose of this limit is to act as a heuristic to prevent stack /// overflow for consumers that do structural induction on an AST using /// explicit recursion. While this crate never does this (instead using /// constant stack space and moving the call stack to the heap), other /// crates may. /// /// This limit is not checked until the entire AST is parsed. /// Therefore, if callers want to put a limit on the amount of heap /// space used, then they should impose a limit on the length, in /// bytes, of the concrete pattern string. In particular, this is /// viable since this parser implementation will limit itself to heap /// space proportional to the length of the pattern string. See also /// the [untrusted inputs](crate#untrusted-input) section in the /// top-level crate documentation for more information about this. /// /// Note that a nest limit of `0` will return a nest limit error for /// most patterns but not all. For example, a nest limit of `0` permits /// `a` but not `ab`, since `ab` requires an explicit concatenation, /// which results in a nest depth of `1`. In general, a nest limit is /// not something that manifests in an obvious way in the concrete /// syntax, therefore, it should not be used in a granular way. /// /// # Example /// /// ``` /// use regex::RegexSetBuilder; /// /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok()); /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err()); /// ``` pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder { self.builder.nest_limit(limit); self } } } pub(crate) mod bytes { use crate::{ bytes::{Regex, RegexSet}, error::Error, }; use super::Builder; /// A configurable builder for a [`Regex`]. /// /// This builder can be used to programmatically set flags such as `i` /// (case insensitive) and `x` (for verbose mode). This builder can also be /// used to configure things like the line terminator and a size limit on /// the compiled regular expression. #[derive(Clone, Debug)] pub struct RegexBuilder { builder: Builder, } impl RegexBuilder { /// Create a new builder with a default configuration for the given /// pattern. /// /// If the pattern is invalid or exceeds the configured size limits, /// then an error will be returned when [`RegexBuilder::build`] is /// called. pub fn new(pattern: &str) -> RegexBuilder { RegexBuilder { builder: Builder::new([pattern]) } } /// Compiles the pattern given to `RegexBuilder::new` with the /// configuration set on this builder. /// /// If the pattern isn't a valid regex or if a configured size limit /// was exceeded, then an error is returned. pub fn build(&self) -> Result<Regex, Error> { self.builder.build_one_bytes() } /// This configures Unicode mode for the entire pattern. /// /// Enabling Unicode mode does a number of things: /// /// * Most fundamentally, it causes the fundamental atom of matching /// to be a single codepoint. When Unicode mode is disabled, it's a /// single byte. For example, when Unicode mode is enabled, `.` will /// match `💩` once, where as it will match 4 times when Unicode mode /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) /// * Case insensitive matching uses Unicode simple case folding rules. /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are /// available. /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and /// `\d`. /// * The word boundary assertions, `\b` and `\B`, use the Unicode /// definition of a word character. /// /// Note that unlike the top-level `Regex` for searching `&str`, it /// is permitted to disable Unicode mode even if the resulting pattern /// could match invalid UTF-8. For example, `(?-u:.)` is not a valid /// pattern for a top-level `Regex`, but is valid for a `bytes::Regex`. /// /// For more details on the Unicode support in this crate, see the /// [Unicode section](crate#unicode) in this crate's top-level /// documentation. /// /// The default for this is `true`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"\w") /// .unicode(false) /// .build() /// .unwrap(); /// // Normally greek letters would be included in \w, but since /// // Unicode mode is disabled, it only matches ASCII letters. /// assert!(!re.is_match("δ".as_bytes())); /// /// let re = RegexBuilder::new(r"s") /// .case_insensitive(true) /// .unicode(false) /// .build() /// .unwrap(); /// // Normally 'ſ' is included when searching for 's' case /// // insensitively due to Unicode's simple case folding rules. But /// // when Unicode mode is disabled, only ASCII case insensitive rules /// // are used. /// assert!(!re.is_match("ſ".as_bytes())); /// ``` /// /// Since this builder is for constructing a [`bytes::Regex`](Regex), /// one can disable Unicode mode even if it would match invalid UTF-8: /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r".") /// .unicode(false) /// .build() /// .unwrap(); /// // Normally greek letters would be included in \w, but since /// // Unicode mode is disabled, it only matches ASCII letters. /// assert!(re.is_match(b"\xFF")); /// ``` pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.unicode(yes); self } /// This configures whether to enable case insensitive matching for the /// entire pattern. /// /// This setting can also be configured using the inline flag `i` /// in the pattern. For example, `(?i:foo)` matches `foo` case /// insensitively while `(?-i:foo)` matches `foo` case sensitively. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"foo(?-i:bar)quux") /// .case_insensitive(true) /// .build() /// .unwrap(); /// assert!(re.is_match(b"FoObarQuUx")); /// // Even though case insensitive matching is enabled in the builder, /// // it can be locally disabled within the pattern. In this case, /// // `bar` is matched case sensitively. /// assert!(!re.is_match(b"fooBARquux")); /// ``` pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.case_insensitive(yes); self } /// This configures multi-line mode for the entire pattern. /// /// Enabling multi-line mode changes the behavior of the `^` and `$` /// anchor assertions. Instead of only matching at the beginning and /// end of a haystack, respectively, multi-line mode causes them to /// match at the beginning and end of a line *in addition* to the /// beginning and end of a haystack. More precisely, `^` will match at /// the position immediately following a `\n` and `$` will match at the /// position immediately preceding a `\n`. /// /// The behavior of this option can be impacted by other settings too: /// /// * The [`RegexBuilder::line_terminator`] option changes `\n` above /// to any ASCII byte. /// * The [`RegexBuilder::crlf`] option changes the line terminator to /// be either `\r` or `\n`, but never at the position between a `\r` /// and `\n`. /// /// This setting can also be configured using the inline flag `m` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .build() /// .unwrap(); /// assert_eq!(Some(1..4), re.find(b"\nfoo\n").map(|m| m.range())); /// ``` pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.multi_line(yes); self } /// This configures dot-matches-new-line mode for the entire pattern. /// /// Perhaps surprisingly, the default behavior for `.` is not to match /// any character, but rather, to match any character except for the /// line terminator (which is `\n` by default). When this mode is /// enabled, the behavior changes such that `.` truly matches any /// character. /// /// This setting can also be configured using the inline flag `s` in /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent /// regexes. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"foo.bar") /// .dot_matches_new_line(true) /// .build() /// .unwrap(); /// let hay = b"foo\nbar"; /// assert_eq!(Some(&b"foo\nbar"[..]), re.find(hay).map(|m| m.as_bytes())); /// ``` pub fn dot_matches_new_line( &mut self, yes: bool, ) -> &mut RegexBuilder { self.builder.dot_matches_new_line(yes); self } /// This configures CRLF mode for the entire pattern. /// /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for /// short) and `\n` ("line feed" or LF for short) are treated as line /// terminators. This results in the following: /// /// * Unless dot-matches-new-line mode is enabled, `.` will now match /// any character except for `\n` and `\r`. /// * When multi-line mode is enabled, `^` will match immediately /// following a `\n` or a `\r`. Similarly, `$` will match immediately /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match /// between `\r` and `\n`. /// /// This setting can also be configured using the inline flag `R` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = b"\r\nfoo\r\n"; /// // If CRLF mode weren't enabled here, then '$' wouldn't match /// // immediately after 'foo', and thus no match would be found. /// assert_eq!(Some(&b"foo"[..]), re.find(hay).map(|m| m.as_bytes())); /// ``` /// /// This example demonstrates that `^` will never match at a position /// between `\r` and `\n`. (`$` will similarly not match between a `\r` /// and a `\n`.) /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"^") /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = b"\r\n\r\n"; /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect(); /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]); /// ``` pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.crlf(yes); self } /// Configures the line terminator to be used by the regex. /// /// The line terminator is relevant in two ways for a particular regex: /// /// * When dot-matches-new-line mode is *not* enabled (the default), /// then `.` will match any character except for the configured line /// terminator. /// * When multi-line mode is enabled (not the default), then `^` and /// `$` will match immediately after and before, respectively, a line /// terminator. /// /// In both cases, if CRLF mode is enabled in a particular context, /// then it takes precedence over any configured line terminator. /// /// This option cannot be configured from within the pattern. /// /// The default line terminator is `\n`. /// /// # Example /// /// This shows how to treat the NUL byte as a line terminator. This can /// be a useful heuristic when searching binary data. /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// let hay = b"\x00foo\x00"; /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range())); /// ``` /// /// This example shows that the behavior of `.` is impacted by this /// setting as well: /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r".") /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// assert!(re.is_match(b"\n")); /// assert!(!re.is_match(b"\x00")); /// ``` /// /// This shows that building a regex will work even when the byte /// given is not ASCII. This is unlike the top-level `Regex` API where /// matching invalid UTF-8 is not allowed. /// /// Note though that you must disable Unicode mode. This is required /// because Unicode mode requires matching one codepoint at a time, /// and there is no way to match a non-ASCII byte as if it were a /// codepoint. /// /// ``` /// use regex::bytes::RegexBuilder; /// /// assert!( /// RegexBuilder::new(r".") /// .unicode(false) /// .line_terminator(0x80) /// .build() /// .is_ok(), /// ); /// ``` pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder { self.builder.line_terminator(byte); self } /// This configures swap-greed mode for the entire pattern. /// /// When swap-greed mode is enabled, patterns like `a+` will become /// non-greedy and patterns like `a+?` will become greedy. In other /// words, the meanings of `a+` and `a+?` are switched. /// /// This setting can also be configured using the inline flag `U` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let re = RegexBuilder::new(r"a+") /// .swap_greed(true) /// .build() /// .unwrap(); /// assert_eq!(Some(&b"a"[..]), re.find(b"aaa").map(|m| m.as_bytes())); /// ``` pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.swap_greed(yes); self } /// This configures verbose mode for the entire pattern. /// /// When enabled, whitespace will treated as insignifcant in the /// pattern and `#` can be used to start a comment until the next new /// line. /// /// Normally, in most places in a pattern, whitespace is treated /// literally. For example ` +` will match one or more ASCII whitespace /// characters. /// /// When verbose mode is enabled, `\#` can be used to match a literal /// `#` and `\ ` can be used to match a literal ASCII whitespace /// character. /// /// Verbose mode is useful for permitting regexes to be formatted and /// broken up more nicely. This may make them more easily readable. /// /// This setting can also be configured using the inline flag `x` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// let pat = r" /// \b /// (?<first>\p{Uppercase}\w*) # always start with uppercase letter /// [\s--\n]+ # whitespace should separate names /// (?: # middle name can be an initial! /// (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*)) /// [\s--\n]+ /// )? /// (?<last>\p{Uppercase}\w*) /// \b /// "; /// let re = RegexBuilder::new(pat) /// .ignore_whitespace(true) /// .build() /// .unwrap(); /// /// let caps = re.captures(b"<NAME>").unwrap(); /// assert_eq!(&b"Harry"[..], &caps["first"]); /// assert_eq!(&b"Potter"[..], &caps["last"]); /// /// let caps = re.captures(b"<NAME>").unwrap(); /// assert_eq!(&b"Harry"[..], &caps["first"]); /// // Since a middle name/initial isn't required for an overall match, /// // we can't assume that 'initial' or 'middle' will be populated! /// assert_eq!( /// Some(&b"J"[..]), /// caps.name("initial").map(|m| m.as_bytes()), /// ); /// assert_eq!(None, caps.name("middle").map(|m| m.as_bytes())); /// assert_eq!(&b"Potter"[..], &caps["last"]); /// /// let caps = re.captures(b"<NAME>").unwrap(); /// assert_eq!(&b"Harry"[..], &caps["first"]); /// // Since a middle name/initial isn't required for an overall match, /// // we can't assume that 'initial' or 'middle' will be populated! /// assert_eq!(None, caps.name("initial").map(|m| m.as_bytes())); /// assert_eq!( /// Some(&b"James"[..]), /// caps.name("middle").map(|m| m.as_bytes()), /// ); /// assert_eq!(&b"Potter"[..], &caps["last"]); /// ``` pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.ignore_whitespace(yes); self } /// This configures octal mode for the entire pattern. /// /// Octal syntax is a little-known way of uttering Unicode codepoints /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all /// equivalent patterns, where the last example shows octal syntax. /// /// While supporting octal syntax isn't in and of itself a problem, /// it does make good error messages harder. That is, in PCRE based /// regex engines, syntax like `\1` invokes a backreference, which is /// explicitly unsupported this library. However, many users expect /// backreferences to be supported. Therefore, when octal support /// is disabled, the error message will explicitly mention that /// backreferences aren't supported. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// // Normally this pattern would not compile, with an error message /// // about backreferences not being supported. But with octal mode /// // enabled, octal escape sequences work. /// let re = RegexBuilder::new(r"\141") /// .octal(true) /// .build() /// .unwrap(); /// assert!(re.is_match(b"a")); /// ``` pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder { self.builder.octal(yes); self } /// Sets the approximate size limit, in bytes, of the compiled regex. /// /// This roughly corresponds to the number of heap memory, in /// bytes, occupied by a single regex. If the regex would otherwise /// approximately exceed this limit, then compiling that regex will /// fail. /// /// The main utility of a method like this is to avoid compiling /// regexes that use an unexpected amount of resources, such as /// time and memory. Even if the memory usage of a large regex is /// acceptable, its search time may not be. Namely, worst case time /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and /// `n ~ len(haystack)`. That is, search time depends, in part, on the /// size of the compiled regex. This means that putting a limit on the /// size of the regex limits how much a regex can impact search time. /// /// For more information about regex size limits, see the section on /// [untrusted inputs](crate#untrusted-input) in the top-level crate /// documentation. /// /// The default for this is some reasonable number that permits most /// patterns to compile successfully. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// // It may surprise you how big some seemingly small patterns can /// // be! Since \w is Unicode aware, this generates a regex that can /// // match approximately 140,000 distinct codepoints. /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err()); /// ``` pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { self.builder.size_limit(bytes); self } /// Set the approximate capacity, in bytes, of the cache of transitions /// used by the lazy DFA. /// /// While the lazy DFA isn't always used, in tends to be the most /// commonly use regex engine in default configurations. It tends to /// adopt the performance profile of a fully build DFA, but without the /// downside of taking worst case exponential time to build. /// /// The downside is that it needs to keep a cache of transitions and /// states that are built while running a search, and this cache /// can fill up. When it fills up, the cache will reset itself. Any /// previously generated states and transitions will then need to be /// re-generated. If this happens too many times, then this library /// will bail out of using the lazy DFA and switch to a different regex /// engine. /// /// If your regex provokes this particular downside of the lazy DFA, /// then it may be beneficial to increase its cache capacity. This will /// potentially reduce the frequency of cache resetting (ideally to /// `0`). While it won't fix all potential performance problems with /// the lazy DFA, increasing the cache capacity does fix some. /// /// There is no easy way to determine, a priori, whether increasing /// this cache capacity will help. In general, the larger your regex, /// the more cache it's likely to use. But that isn't an ironclad rule. /// For example, a regex like `[01]*1[01]{N}` would normally produce a /// fully build DFA that is exponential in size with respect to `N`. /// The lazy DFA will prevent exponential space blow-up, but it cache /// is likely to fill up, even when it's large and even for smallish /// values of `N`. /// /// If you aren't sure whether this helps or not, it is sensible to /// set this to some arbitrarily large number in testing, such as /// `usize::MAX`. Namely, this represents the amount of capacity that /// *may* be used. It's probably not a good idea to use `usize::MAX` in /// production though, since it implies there are no controls on heap /// memory used by this library during a search. In effect, set it to /// whatever you're willing to allocate for a single regex search. pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { self.builder.dfa_size_limit(bytes); self } /// Set the nesting limit for this parser. /// /// The nesting limit controls how deep the abstract syntax tree is /// allowed to be. If the AST exceeds the given limit (e.g., with too /// many nested groups), then an error is returned by the parser. /// /// The purpose of this limit is to act as a heuristic to prevent stack /// overflow for consumers that do structural induction on an AST using /// explicit recursion. While this crate never does this (instead using /// constant stack space and moving the call stack to the heap), other /// crates may. /// /// This limit is not checked until the entire AST is parsed. /// Therefore, if callers want to put a limit on the amount of heap /// space used, then they should impose a limit on the length, in /// bytes, of the concrete pattern string. In particular, this is /// viable since this parser implementation will limit itself to heap /// space proportional to the length of the pattern string. See also /// the [untrusted inputs](crate#untrusted-input) section in the /// top-level crate documentation for more information about this. /// /// Note that a nest limit of `0` will return a nest limit error for /// most patterns but not all. For example, a nest limit of `0` permits /// `a` but not `ab`, since `ab` requires an explicit concatenation, /// which results in a nest depth of `1`. In general, a nest limit is /// not something that manifests in an obvious way in the concrete /// syntax, therefore, it should not be used in a granular way. /// /// # Example /// /// ``` /// use regex::bytes::RegexBuilder; /// /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok()); /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err()); /// ``` pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { self.builder.nest_limit(limit); self } } /// A configurable builder for a [`RegexSet`]. /// /// This builder can be used to programmatically set flags such as `i` /// (case insensitive) and `x` (for verbose mode). This builder can also be /// used to configure things like the line terminator and a size limit on /// the compiled regular expression. #[derive(Clone, Debug)] pub struct RegexSetBuilder { builder: Builder, } impl RegexSetBuilder { /// Create a new builder with a default configuration for the given /// patterns. /// /// If the patterns are invalid or exceed the configured size limits, /// then an error will be returned when [`RegexSetBuilder::build`] is /// called. pub fn new<I, S>(patterns: I) -> RegexSetBuilder where I: IntoIterator<Item = S>, S: AsRef<str>, { RegexSetBuilder { builder: Builder::new(patterns) } } /// Compiles the patterns given to `RegexSetBuilder::new` with the /// configuration set on this builder. /// /// If the patterns aren't valid regexes or if a configured size limit /// was exceeded, then an error is returned. pub fn build(&self) -> Result<RegexSet, Error> { self.builder.build_many_bytes() } /// This configures Unicode mode for the all of the patterns. /// /// Enabling Unicode mode does a number of things: /// /// * Most fundamentally, it causes the fundamental atom of matching /// to be a single codepoint. When Unicode mode is disabled, it's a /// single byte. For example, when Unicode mode is enabled, `.` will /// match `💩` once, where as it will match 4 times when Unicode mode /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) /// * Case insensitive matching uses Unicode simple case folding rules. /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are /// available. /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and /// `\d`. /// * The word boundary assertions, `\b` and `\B`, use the Unicode /// definition of a word character. /// /// Note that unlike the top-level `RegexSet` for searching `&str`, /// it is permitted to disable Unicode mode even if the resulting /// pattern could match invalid UTF-8. For example, `(?-u:.)` is not /// a valid pattern for a top-level `RegexSet`, but is valid for a /// `bytes::RegexSet`. /// /// For more details on the Unicode support in this crate, see the /// [Unicode section](crate#unicode) in this crate's top-level /// documentation. /// /// The default for this is `true`. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"\w"]) /// .unicode(false) /// .build() /// .unwrap(); /// // Normally greek letters would be included in \w, but since /// // Unicode mode is disabled, it only matches ASCII letters. /// assert!(!re.is_match("δ".as_bytes())); /// /// let re = RegexSetBuilder::new([r"s"]) /// .case_insensitive(true) /// .unicode(false) /// .build() /// .unwrap(); /// // Normally 'ſ' is included when searching for 's' case /// // insensitively due to Unicode's simple case folding rules. But /// // when Unicode mode is disabled, only ASCII case insensitive rules /// // are used. /// assert!(!re.is_match("ſ".as_bytes())); /// ``` /// /// Since this builder is for constructing a /// [`bytes::RegexSet`](RegexSet), one can disable Unicode mode even if /// it would match invalid UTF-8: /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"."]) /// .unicode(false) /// .build() /// .unwrap(); /// // Normally greek letters would be included in \w, but since /// // Unicode mode is disabled, it only matches ASCII letters. /// assert!(re.is_match(b"\xFF")); /// ``` pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.unicode(yes); self } /// This configures whether to enable case insensitive matching for all /// of the patterns. /// /// This setting can also be configured using the inline flag `i` /// in the pattern. For example, `(?i:foo)` matches `foo` case /// insensitively while `(?-i:foo)` matches `foo` case sensitively. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"]) /// .case_insensitive(true) /// .build() /// .unwrap(); /// assert!(re.is_match(b"FoObarQuUx")); /// // Even though case insensitive matching is enabled in the builder, /// // it can be locally disabled within the pattern. In this case, /// // `bar` is matched case sensitively. /// assert!(!re.is_match(b"fooBARquux")); /// ``` pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.case_insensitive(yes); self } /// This configures multi-line mode for all of the patterns. /// /// Enabling multi-line mode changes the behavior of the `^` and `$` /// anchor assertions. Instead of only matching at the beginning and /// end of a haystack, respectively, multi-line mode causes them to /// match at the beginning and end of a line *in addition* to the /// beginning and end of a haystack. More precisely, `^` will match at /// the position immediately following a `\n` and `$` will match at the /// position immediately preceding a `\n`. /// /// The behavior of this option can be impacted by other settings too: /// /// * The [`RegexSetBuilder::line_terminator`] option changes `\n` /// above to any ASCII byte. /// * The [`RegexSetBuilder::crlf`] option changes the line terminator /// to be either `\r` or `\n`, but never at the position between a `\r` /// and `\n`. /// /// This setting can also be configured using the inline flag `m` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^foo$"]) /// .multi_line(true) /// .build() /// .unwrap(); /// assert!(re.is_match(b"\nfoo\n")); /// ``` pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.multi_line(yes); self } /// This configures dot-matches-new-line mode for the entire pattern. /// /// Perhaps surprisingly, the default behavior for `.` is not to match /// any character, but rather, to match any character except for the /// line terminator (which is `\n` by default). When this mode is /// enabled, the behavior changes such that `.` truly matches any /// character. /// /// This setting can also be configured using the inline flag `s` in /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent /// regexes. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"foo.bar"]) /// .dot_matches_new_line(true) /// .build() /// .unwrap(); /// let hay = b"foo\nbar"; /// assert!(re.is_match(hay)); /// ``` pub fn dot_matches_new_line( &mut self, yes: bool, ) -> &mut RegexSetBuilder { self.builder.dot_matches_new_line(yes); self } /// This configures CRLF mode for all of the patterns. /// /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for /// short) and `\n` ("line feed" or LF for short) are treated as line /// terminators. This results in the following: /// /// * Unless dot-matches-new-line mode is enabled, `.` will now match /// any character except for `\n` and `\r`. /// * When multi-line mode is enabled, `^` will match immediately /// following a `\n` or a `\r`. Similarly, `$` will match immediately /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match /// between `\r` and `\n`. /// /// This setting can also be configured using the inline flag `R` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^foo$"]) /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = b"\r\nfoo\r\n"; /// // If CRLF mode weren't enabled here, then '$' wouldn't match /// // immediately after 'foo', and thus no match would be found. /// assert!(re.is_match(hay)); /// ``` /// /// This example demonstrates that `^` will never match at a position /// between `\r` and `\n`. (`$` will similarly not match between a `\r` /// and a `\n`.) /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^\n"]) /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// assert!(!re.is_match(b"\r\n")); /// ``` pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.crlf(yes); self } /// Configures the line terminator to be used by the regex. /// /// The line terminator is relevant in two ways for a particular regex: /// /// * When dot-matches-new-line mode is *not* enabled (the default), /// then `.` will match any character except for the configured line /// terminator. /// * When multi-line mode is enabled (not the default), then `^` and /// `$` will match immediately after and before, respectively, a line /// terminator. /// /// In both cases, if CRLF mode is enabled in a particular context, /// then it takes precedence over any configured line terminator. /// /// This option cannot be configured from within the pattern. /// /// The default line terminator is `\n`. /// /// # Example /// /// This shows how to treat the NUL byte as a line terminator. This can /// be a useful heuristic when searching binary data. /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"^foo$"]) /// .multi_line(true) /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// let hay = b"\x00foo\x00"; /// assert!(re.is_match(hay)); /// ``` /// /// This example shows that the behavior of `.` is impacted by this /// setting as well: /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let re = RegexSetBuilder::new([r"."]) /// .line_terminator(b'\x00') /// .build() /// .unwrap(); /// assert!(re.is_match(b"\n")); /// assert!(!re.is_match(b"\x00")); /// ``` /// /// This shows that building a regex will work even when the byte given /// is not ASCII. This is unlike the top-level `RegexSet` API where /// matching invalid UTF-8 is not allowed. /// /// Note though that you must disable Unicode mode. This is required /// because Unicode mode requires matching one codepoint at a time, /// and there is no way to match a non-ASCII byte as if it were a /// codepoint. /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// assert!( /// RegexSetBuilder::new([r"."]) /// .unicode(false) /// .line_terminator(0x80) /// .build() /// .is_ok(), /// ); /// ``` pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder { self.builder.line_terminator(byte); self } /// This configures swap-greed mode for all of the patterns. /// /// When swap-greed mode is enabled, patterns like `a+` will become /// non-greedy and patterns like `a+?` will become greedy. In other /// words, the meanings of `a+` and `a+?` are switched. /// /// This setting can also be configured using the inline flag `U` in /// the pattern. /// /// Note that this is generally not useful for a `RegexSet` since a /// `RegexSet` can only report whether a pattern matches or not. Since /// greediness never impacts whether a match is found or not (only the /// offsets of the match), it follows that whether parts of a pattern /// are greedy or not doesn't matter for a `RegexSet`. /// /// The default for this is `false`. pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.swap_greed(yes); self } /// This configures verbose mode for all of the patterns. /// /// When enabled, whitespace will treated as insignifcant in the /// pattern and `#` can be used to start a comment until the next new /// line. /// /// Normally, in most places in a pattern, whitespace is treated /// literally. For example ` +` will match one or more ASCII whitespace /// characters. /// /// When verbose mode is enabled, `\#` can be used to match a literal /// `#` and `\ ` can be used to match a literal ASCII whitespace /// character. /// /// Verbose mode is useful for permitting regexes to be formatted and /// broken up more nicely. This may make them more easily readable. /// /// This setting can also be configured using the inline flag `x` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// let pat = r" /// \b /// (?<first>\p{Uppercase}\w*) # always start with uppercase letter /// [\s--\n]+ # whitespace should separate names /// (?: # middle name can be an initial! /// (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*)) /// [\s--\n]+ /// )? /// (?<last>\p{Uppercase}\w*) /// \b /// "; /// let re = RegexSetBuilder::new([pat]) /// .ignore_whitespace(true) /// .build() /// .unwrap(); /// assert!(re.is_match(b"<NAME>")); /// assert!(re.is_match(b"<NAME>")); /// assert!(re.is_match(b"<NAME>")); /// assert!(!re.is_match(b"<NAME>")); /// ``` pub fn ignore_whitespace( &mut self, yes: bool, ) -> &mut RegexSetBuilder { self.builder.ignore_whitespace(yes); self } /// This configures octal mode for all of the patterns. /// /// Octal syntax is a little-known way of uttering Unicode codepoints /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all /// equivalent patterns, where the last example shows octal syntax. /// /// While supporting octal syntax isn't in and of itself a problem, /// it does make good error messages harder. That is, in PCRE based /// regex engines, syntax like `\1` invokes a backreference, which is /// explicitly unsupported this library. However, many users expect /// backreferences to be supported. Therefore, when octal support /// is disabled, the error message will explicitly mention that /// backreferences aren't supported. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// // Normally this pattern would not compile, with an error message /// // about backreferences not being supported. But with octal mode /// // enabled, octal escape sequences work. /// let re = RegexSetBuilder::new([r"\141"]) /// .octal(true) /// .build() /// .unwrap(); /// assert!(re.is_match(b"a")); /// ``` pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder { self.builder.octal(yes); self } /// Sets the approximate size limit, in bytes, of the compiled regex. /// /// This roughly corresponds to the number of heap memory, in /// bytes, occupied by a single regex. If the regex would otherwise /// approximately exceed this limit, then compiling that regex will /// fail. /// /// The main utility of a method like this is to avoid compiling /// regexes that use an unexpected amount of resources, such as /// time and memory. Even if the memory usage of a large regex is /// acceptable, its search time may not be. Namely, worst case time /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and /// `n ~ len(haystack)`. That is, search time depends, in part, on the /// size of the compiled regex. This means that putting a limit on the /// size of the regex limits how much a regex can impact search time. /// /// For more information about regex size limits, see the section on /// [untrusted inputs](crate#untrusted-input) in the top-level crate /// documentation. /// /// The default for this is some reasonable number that permits most /// patterns to compile successfully. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// // It may surprise you how big some seemingly small patterns can /// // be! Since \w is Unicode aware, this generates a regex that can /// // match approximately 140,000 distinct codepoints. /// assert!( /// RegexSetBuilder::new([r"\w"]) /// .size_limit(45_000) /// .build() /// .is_err() /// ); /// ``` pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder { self.builder.size_limit(bytes); self } /// Set the approximate capacity, in bytes, of the cache of transitions /// used by the lazy DFA. /// /// While the lazy DFA isn't always used, in tends to be the most /// commonly use regex engine in default configurations. It tends to /// adopt the performance profile of a fully build DFA, but without the /// downside of taking worst case exponential time to build. /// /// The downside is that it needs to keep a cache of transitions and /// states that are built while running a search, and this cache /// can fill up. When it fills up, the cache will reset itself. Any /// previously generated states and transitions will then need to be /// re-generated. If this happens too many times, then this library /// will bail out of using the lazy DFA and switch to a different regex /// engine. /// /// If your regex provokes this particular downside of the lazy DFA, /// then it may be beneficial to increase its cache capacity. This will /// potentially reduce the frequency of cache resetting (ideally to /// `0`). While it won't fix all potential performance problems with /// the lazy DFA, increasing the cache capacity does fix some. /// /// There is no easy way to determine, a priori, whether increasing /// this cache capacity will help. In general, the larger your regex, /// the more cache it's likely to use. But that isn't an ironclad rule. /// For example, a regex like `[01]*1[01]{N}` would normally produce a /// fully build DFA that is exponential in size with respect to `N`. /// The lazy DFA will prevent exponential space blow-up, but it cache /// is likely to fill up, even when it's large and even for smallish /// values of `N`. /// /// If you aren't sure whether this helps or not, it is sensible to /// set this to some arbitrarily large number in testing, such as /// `usize::MAX`. Namely, this represents the amount of capacity that /// *may* be used. It's probably not a good idea to use `usize::MAX` in /// production though, since it implies there are no controls on heap /// memory used by this library during a search. In effect, set it to /// whatever you're willing to allocate for a single regex search. pub fn dfa_size_limit( &mut self, bytes: usize, ) -> &mut RegexSetBuilder { self.builder.dfa_size_limit(bytes); self } /// Set the nesting limit for this parser. /// /// The nesting limit controls how deep the abstract syntax tree is /// allowed to be. If the AST exceeds the given limit (e.g., with too /// many nested groups), then an error is returned by the parser. /// /// The purpose of this limit is to act as a heuristic to prevent stack /// overflow for consumers that do structural induction on an AST using /// explicit recursion. While this crate never does this (instead using /// constant stack space and moving the call stack to the heap), other /// crates may. /// /// This limit is not checked until the entire AST is parsed. /// Therefore, if callers want to put a limit on the amount of heap /// space used, then they should impose a limit on the length, in /// bytes, of the concrete pattern string. In particular, this is /// viable since this parser implementation will limit itself to heap /// space proportional to the length of the pattern string. See also /// the [untrusted inputs](crate#untrusted-input) section in the /// top-level crate documentation for more information about this. /// /// Note that a nest limit of `0` will return a nest limit error for /// most patterns but not all. For example, a nest limit of `0` permits /// `a` but not `ab`, since `ab` requires an explicit concatenation, /// which results in a nest depth of `1`. In general, a nest limit is /// not something that manifests in an obvious way in the concrete /// syntax, therefore, it should not be used in a granular way. /// /// # Example /// /// ``` /// use regex::bytes::RegexSetBuilder; /// /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok()); /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err()); /// ``` pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder { self.builder.nest_limit(limit); self } } } <file_sep>/regex-automata/tests/nfa/thompson/backtrack/suite.rs use { anyhow::Result, regex_automata::{ nfa::thompson::{ self, backtrack::{self, BoundedBacktracker}, NFA, }, util::{prefilter::Prefilter, syntax}, Input, }, regex_test::{ CompiledRegex, Match, MatchKind, RegexTest, SearchKind, Span, TestResult, TestRunner, }, }; use crate::{create_input, suite, testify_captures}; /// Tests the default configuration of the bounded backtracker. #[test] fn default() -> Result<()> { let builder = BoundedBacktracker::builder(); let mut runner = TestRunner::new()?; runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); // At the time of writing, every regex search in the test suite fits // into the backtracker's default visited capacity (except for the // blacklisted tests below). If regexes are added that blow that capacity, // then they should be blacklisted here. A tempting alternative is to // automatically skip them by checking the haystack length against // BoundedBacktracker::max_haystack_len, but that could wind up hiding // interesting failure modes. e.g., If the visited capacity is somehow // wrong or smaller than it should be. runner.blacklist("expensive/backtrack-blow-visited-capacity"); runner.test_iter(suite()?.iter(), compiler(builder)).assert(); Ok(()) } /// Tests the backtracker with prefilters enabled. #[test] fn prefilter() -> Result<()> { let my_compiler = |test: &RegexTest, regexes: &[String]| { // Parse regexes as HIRs so we can get literals to build a prefilter. let mut hirs = vec![]; for pattern in regexes.iter() { hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); } // We can always select leftmost-first here because the backtracker // only supports leftmost-first matching. let pre = Prefilter::from_hirs_prefix( regex_automata::MatchKind::LeftmostFirst, &hirs, ); let mut builder = BoundedBacktracker::builder(); builder.configure(BoundedBacktracker::config().prefilter(pre)); compiler(builder)(test, regexes) }; let mut runner = TestRunner::new()?; runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); runner.blacklist("expensive/backtrack-blow-visited-capacity"); runner.test_iter(suite()?.iter(), my_compiler).assert(); Ok(()) } /// Tests the bounded backtracker when its visited capacity is set to its /// minimum amount. #[test] fn min_visited_capacity() -> Result<()> { let mut runner = TestRunner::new()?; runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); runner .test_iter(suite()?.iter(), move |test, regexes| { let nfa = NFA::compiler() .configure(config_thompson(test)) .syntax(config_syntax(test)) .build_many(&regexes)?; let mut builder = BoundedBacktracker::builder(); if !configure_backtrack_builder(test, &mut builder) { return Ok(CompiledRegex::skip()); } // Setup the bounded backtracker so that its visited capacity is // the absolute minimum required for the test's haystack. builder.configure(BoundedBacktracker::config().visited_capacity( backtrack::min_visited_capacity( &nfa, &Input::new(test.haystack()), ), )); let re = builder.build_from_nfa(nfa)?; let mut cache = re.create_cache(); Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, &mut cache, test) })) }) .assert(); Ok(()) } fn compiler( mut builder: backtrack::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { move |test, regexes| { if !configure_backtrack_builder(test, &mut builder) { return Ok(CompiledRegex::skip()); } let re = builder.build_many(&regexes)?; let mut cache = re.create_cache(); Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, &mut cache, test) })) } } fn run_test( re: &BoundedBacktracker, cache: &mut backtrack::Cache, test: &RegexTest, ) -> TestResult { let input = create_input(test); match test.additional_name() { "is_match" => match test.search_kind() { SearchKind::Earliest | SearchKind::Overlapping => { TestResult::skip() } SearchKind::Leftmost => { let input = input.earliest(true); TestResult::matched(re.try_is_match(cache, input).unwrap()) } }, "find" => match test.search_kind() { SearchKind::Earliest | SearchKind::Overlapping => { TestResult::skip() } SearchKind::Leftmost => TestResult::matches( re.try_find_iter(cache, input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|result| result.unwrap()) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }), ), }, "captures" => match test.search_kind() { SearchKind::Earliest | SearchKind::Overlapping => { TestResult::skip() } SearchKind::Leftmost => TestResult::captures( re.try_captures_iter(cache, input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|result| result.unwrap()) .map(|caps| testify_captures(&caps)), ), }, name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Configures the given regex builder with all relevant settings on the given /// regex test. /// /// If the regex test has a setting that is unsupported, then this returns /// false (implying the test should be skipped). fn configure_backtrack_builder( test: &RegexTest, builder: &mut backtrack::Builder, ) -> bool { match (test.search_kind(), test.match_kind()) { // For testing the standard search APIs. This is the only supported // configuration for the backtracker. (SearchKind::Leftmost, MatchKind::LeftmostFirst) => {} // Overlapping APIs not supported at all for backtracker. (SearchKind::Overlapping, _) => return false, // Backtracking doesn't really support the notion of 'earliest'. // Namely, backtracking already works by returning as soon as it knows // it has found a match. It just so happens that this corresponds to // the standard 'leftmost' formulation. // // The 'earliest' definition in this crate does indeed permit this // behavior, so this is "fine," but our test suite specifically looks // for the earliest position at which a match is known, which our // finite automata based regex engines have no problem providing. So // for backtracking, we just skip these tests. (SearchKind::Earliest, _) => return false, // For backtracking, 'all' semantics don't really make sense. (_, MatchKind::All) => return false, // Not supported at all in regex-automata. (_, MatchKind::LeftmostLongest) => return false, }; let backtrack_config = BoundedBacktracker::config(); builder .configure(backtrack_config) .syntax(config_syntax(test)) .thompson(config_thompson(test)); true } /// Configuration of a Thompson NFA compiler from a regex test. fn config_thompson(test: &RegexTest) -> thompson::Config { let mut lookm = regex_automata::util::look::LookMatcher::new(); lookm.set_line_terminator(test.line_terminator()); thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) } /// Configuration of the regex parser from a regex test. fn config_syntax(test: &RegexTest) -> syntax::Config { syntax::Config::new() .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .utf8(test.utf8()) .line_terminator(test.line_terminator()) } <file_sep>/src/error.rs use alloc::string::{String, ToString}; use regex_automata::meta; /// An error that occurred during parsing or compiling a regular expression. #[non_exhaustive] #[derive(Clone, PartialEq)] pub enum Error { /// A syntax error. Syntax(String), /// The compiled program exceeded the set size /// limit. The argument is the size limit imposed by /// [`RegexBuilder::size_limit`](crate::RegexBuilder::size_limit). Even /// when not configured explicitly, it defaults to a reasonable limit. /// /// If you're getting this error, it occurred because your regex has been /// compiled to an intermediate state that is too big. It is important to /// note that exceeding this limit does _not_ mean the regex is too big to /// _work_, but rather, the regex is big enough that it may wind up being /// surprisingly slow when used in a search. In other words, this error is /// meant to be a practical heuristic for avoiding a performance footgun, /// and especially so for the case where the regex pattern is coming from /// an untrusted source. /// /// There are generally two ways to move forward if you hit this error. /// The first is to find some way to use a smaller regex. The second is to /// increase the size limit via `RegexBuilder::size_limit`. However, if /// your regex pattern is not from a trusted source, then neither of these /// approaches may be appropriate. Instead, you'll have to determine just /// how big of a regex you want to allow. CompiledTooBig(usize), } impl Error { pub(crate) fn from_meta_build_error(err: meta::BuildError) -> Error { if let Some(size_limit) = err.size_limit() { Error::CompiledTooBig(size_limit) } else if let Some(ref err) = err.syntax_error() { Error::Syntax(err.to_string()) } else { // This is a little suspect. Technically there are more ways for // a meta regex to fail to build other than "exceeded size limit" // and "syntax error." For example, if there are too many states // or even too many patterns. But in practice this is probably // good enough. The worst thing that happens is that Error::Syntax // represents an error that isn't technically a syntax error, but // the actual message will still be shown. So... it's not too bad. // // We really should have made the Error type in the regex crate // completely opaque. Rookie mistake. Error::Syntax(err.to_string()) } } } #[cfg(feature = "std")] impl std::error::Error for Error { // TODO: Remove this method entirely on the next breaking semver release. #[allow(deprecated)] fn description(&self) -> &str { match *self { Error::Syntax(ref err) => err, Error::CompiledTooBig(_) => "compiled program too big", } } } impl core::fmt::Display for Error { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match *self { Error::Syntax(ref err) => err.fmt(f), Error::CompiledTooBig(limit) => write!( f, "Compiled regex exceeds size limit of {} bytes.", limit ), } } } // We implement our own Debug implementation so that we show nicer syntax // errors when people use `Regex::new(...).unwrap()`. It's a little weird, // but the `Syntax` variant is already storing a `String` anyway, so we might // as well format it nicely. impl core::fmt::Debug for Error { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match *self { Error::Syntax(ref err) => { let hr: String = core::iter::repeat('~').take(79).collect(); writeln!(f, "Syntax(")?; writeln!(f, "{}", hr)?; writeln!(f, "{}", err)?; writeln!(f, "{}", hr)?; write!(f, ")")?; Ok(()) } Error::CompiledTooBig(limit) => { f.debug_tuple("CompiledTooBig").field(&limit).finish() } } } } <file_sep>/regex-syntax/Cargo.toml [package] name = "regex-syntax" version = "0.7.5" #:version authors = ["The Rust Project Developers", "<NAME> <<EMAIL>>"] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-lang/regex/tree/master/regex-syntax" documentation = "https://docs.rs/regex-syntax" description = "A regular expression parser." workspace = ".." edition = "2021" rust-version = "1.60.0" # Features are documented in the "Crate features" section of the crate docs: # https://docs.rs/regex-syntax/*/#crate-features [features] default = ["std", "unicode"] std = [] arbitrary = ["dep:arbitrary"] unicode = [ "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment", ] unicode-age = [] unicode-bool = [] unicode-case = [] unicode-gencat = [] unicode-perl = [] unicode-script = [] unicode-segment = [] [dependencies] arbitrary = { version = "1.3.0", features = ["derive"], optional = true } [package.metadata.docs.rs] # We want to document all features. all-features = true # Since this crate's feature setup is pretty complicated, it is worth opting # into a nightly unstable option to show the features that need to be enabled # for public API items. To do that, we set 'docsrs', and when that's enabled, # we enable the 'doc_auto_cfg' feature. # # To test this locally, run: # # RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features rustdoc-args = ["--cfg", "docsrs"] <file_sep>/regex-cli/cmd/generate/fowler.rs use std::{ collections::HashSet, fs::File, io::{BufRead, Read, Write}, path::{Path, PathBuf}, }; use { anyhow::Context, bstr::ByteSlice, lexopt::{Arg, Parser}, }; use crate::args::{self, Usage}; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = r#" Generate TOML tests from Glenn Fowler's regex test suite. This corresponds to a very sizeable set of regex tests that were written many moons ago. They have been tweaked slightly by both <NAME> and myself (Andrew Gallant). This tool is the spiritual successor of some hacky Python scripts. Its input is a bespoke plain text format matching the original test data, and its output are TOML files meant to work with the 'regex-test' crate. Example usage from the root of this repository: regex-cli generate fowler tests/data/fowler tests/data/fowler/dat/*.dat See tests/data/fowler/dat/README for more context. USAGE: regex-cli generate fowler <outdir> <datfile> ... outdir should be a path to a directory where you want the TOML files to be written. datfile should be a Glenn Fowler dat file whose format is bespoke. "#; let mut config = Config::default(); args::configure(p, USAGE, &mut [&mut config])?; let outdir = config.outdir()?; let datfiles = config.datfiles()?; for datfile in datfiles.iter() { let datfile = Path::new(datfile); let stem = match datfile.file_stem() { Some(stem) => stem.to_string_lossy(), None => anyhow::bail!("{}: has no file stem", datfile.display()), }; let tomlfile = outdir.join(format!("{}.toml", stem)); let mut rdr = File::open(datfile) .with_context(|| datfile.display().to_string())?; let mut wtr = File::create(&tomlfile) .with_context(|| tomlfile.display().to_string())?; convert(&stem, &mut rdr, &mut wtr) .with_context(|| stem.to_string())?; } Ok(()) } #[derive(Debug, Default)] struct Config { outdir: Option<PathBuf>, datfiles: Vec<PathBuf>, } impl Config { fn outdir(&self) -> anyhow::Result<&Path> { self.outdir .as_deref() .ok_or_else(|| anyhow::anyhow!("missing <outdir>")) } fn datfiles(&self) -> anyhow::Result<&[PathBuf]> { anyhow::ensure!(!self.datfiles.is_empty(), "no Fowler datfiles given",); Ok(&self.datfiles) } } impl args::Configurable for Config { fn configure( &mut self, _: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Value(ref mut value) => { let path = PathBuf::from(std::mem::take(value)); if self.outdir.is_none() { self.outdir = Some(path); } else { self.datfiles.push(path); } } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[]; USAGES } } fn convert( mut group_name: &str, src: &mut dyn Read, dst: &mut dyn Write, ) -> anyhow::Result<()> { log::trace!("processing {}", group_name); let src = std::io::BufReader::new(src); writeln!( dst, "\ # !!! DO NOT EDIT !!! # Automatically generated by 'regex-cli generate fowler'. # Numbers in the test names correspond to the line number of the test from # the original dat file. " )?; let mut prev = None; for (i, result) in src.lines().enumerate() { // Every usize can fit into a u64... Right? let line_number = u64::try_from(i).unwrap().checked_add(1).unwrap(); let line = result.with_context(|| format!("line {}", line_number))?; // The last group of tests in 'repetition' take quite a lot of time // when using them to build and minimize a DFA. So we tag them with // 'expensive' so that we can skip those tests when we need to minimize // a DFA. if group_name == "repetition" && line.contains("<NAME>") { group_name = "repetition-expensive"; } if line.trim().is_empty() || line.starts_with('#') { // Too noisy to log that we're skipping an empty or commented test. continue; } let dat = match DatTest::parse(prev.as_ref(), line_number, &line)? { None => continue, Some(dat) => dat, }; let toml = TomlTest::from_dat_test(group_name, &dat)?; writeln!(dst, "{}", toml)?; prev = Some(dat); } Ok(()) } #[derive(Debug)] struct TomlTest { group_name: String, line_number: u64, regex: String, haystack: String, captures: Vec<Option<(u64, u64)>>, unescape: bool, case_insensitive: bool, comment: Option<String>, } impl TomlTest { fn from_dat_test( group_name: &str, dat: &DatTest, ) -> anyhow::Result<TomlTest> { let mut captures = dat.captures.clone(); if !captures.is_empty() { // Many of the Fowler tests don't actually list out every capturing // group match, and they instead stop once all remaining capturing // groups are empty. In effect, it makes writing tests terser, // but adds more implicitness. The TOML test suite does not make // this trade off (to this extent anyway), so it really wants all // capturing groups... // // So what we do here is is look for the number of groups in the // pattern and then just pad out the capture matches with None // values to make the number of capture matches equal to what we // would expect from the pattern. (We actually parse the regex to // determine this.) // // Sadly, this doesn't work for a small subset of tests that // actually have more capturing group MATCHES than what is listed // explicitly in the test. Instead, the test includes an 'nmatch' // instruction that instructs the test runner to only consider the // first N capturing groups. Our test runner has no such option... // To fix that, I rewrote the tests to use non-capturing groups in // order to match the expected number of capture matches. let numcaps = count_capturing_groups(&dat.regex)?; for _ in captures.len()..numcaps { captures.push(None); } } let comment = if dat.re2go { Some("Test added by RE2/Go project.".to_string()) } else if dat.rust { Some("Test added by Rust regex project.".to_string()) } else { None }; Ok(TomlTest { group_name: group_name.to_string(), line_number: dat.line_number, regex: dat.regex.clone(), haystack: dat.haystack.clone(), captures, unescape: dat.flags.contains(&'$'), case_insensitive: dat.flags.contains(&'i'), comment, }) } } impl core::fmt::Display for TomlTest { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { if let Some(ref comment) = self.comment { writeln!(f, "# {}", comment)?; } writeln!(f, "[[test]]")?; writeln!(f, "name = \"{}{}\"", self.group_name, self.line_number)?; writeln!(f, "regex = '''{}'''", self.regex)?; writeln!(f, "haystack = '''{}'''", self.haystack)?; if self.captures.is_empty() { writeln!(f, "matches = []")?; } else { write!(f, "matches = [[")?; for (i, &group) in self.captures.iter().enumerate() { if i > 0 { write!(f, ", ")?; } match group { None => write!(f, "[]")?, Some((start, end)) => write!(f, "[{}, {}]", start, end)?, } } writeln!(f, "]]")?; } writeln!(f, "match-limit = 1")?; // If the match starts at 0, then set anchored=true. This gives us more // coverage on the anchored option and lets regex engines like the // one-pass DFA participate a bit more in the test suite. if self .captures .get(0) .and_then(|&s| s) .map_or(false, |span| span.0 == 0) { writeln!(f, "anchored = true")?; } if self.unescape { writeln!(f, "unescape = true")?; } if self.case_insensitive { writeln!(f, "case-insensitive = true")?; } Ok(()) } } #[derive(Debug)] struct DatTest { line_number: u64, flags: HashSet<char>, regex: String, haystack: String, captures: Vec<Option<(u64, u64)>>, re2go: bool, rust: bool, } impl DatTest { fn parse( prev: Option<&DatTest>, line_number: u64, line: &str, ) -> anyhow::Result<Option<DatTest>> { let fields: Vec<String> = line .split('\t') .map(|f| f.trim().to_string()) .filter(|f| !f.is_empty()) .collect(); if !(4 <= fields.len() && fields.len() <= 5) { log::trace!( "skipping {}: too few or too many fields ({})", line_number, fields.len(), ); return Ok(None); } // First field contains terse one-letter flags. let mut flags: HashSet<char> = fields[0].chars().collect(); if !flags.contains(&'E') { log::trace!("skipping {}: does not contain 'E' flag", line_number); return Ok(None); } // Second field contains the regex pattern or 'SAME' if it's the // same as the regex in the previous test. let mut regex = fields[1].clone(); if regex == "SAME" { regex = match prev { Some(test) => test.regex.clone(), None => anyhow::bail!( "line {}: wants previous pattern but none is available", line_number, ), }; } // Third field contains the text to search or 'NULL'. let mut haystack = fields[2].clone(); if haystack == "NULL" { haystack = "".to_string(); } // Some tests have literal control characters in the regex/input // instead of using escapes. TOML freaks out at this, so we detect the // case, escape them and add '$' to our flags. (Which will ultimately // instruct the test harness to unescape the input.) if regex.chars().any(|c| c.is_control()) || haystack.chars().any(|c| c.is_control()) { flags.insert('$'); regex = regex.as_bytes().escape_bytes().to_string(); haystack = haystack.as_bytes().escape_bytes().to_string(); } // Fourth field contains the capturing groups, or 'NOMATCH' or an // error. let mut captures = vec![]; if fields[3] != "NOMATCH" { // Some tests check for a compilation error to occur, but we skip // these for now. We might consider adding them manually, or better // yet, just adding support for them here. if !fields[3].contains(',') { log::trace!( "skipping {}: malformed capturing group", line_number ); return Ok(None); } let noparen = fields[3] .split(")(") .map(|x| x.trim_matches(|c| c == '(' || c == ')')); for group in noparen { let (start, end) = match group.split_once(',') { Some((start, end)) => (start.trim(), end.trim()), None => anyhow::bail!( "line {}: invalid capturing group '{}' in '{}'", line_number, group, fields[3] ), }; if start == "?" && end == "?" { captures.push(None); } else { let start = start.parse()?; let end = end.parse()?; captures.push(Some((start, end))); } } } // The fifth field is optional and contains some notes. Currently, this // is used to indicate tests added or modified by particular regex // implementations. let re2go = fields.len() >= 5 && fields[4] == "RE2/Go"; let rust = fields.len() >= 5 && fields[4] == "Rust"; Ok(Some(DatTest { line_number, flags, regex, haystack, captures, re2go, rust, })) } } fn count_capturing_groups(pattern: &str) -> anyhow::Result<usize> { let ast = regex_syntax::ast::parse::Parser::new() .parse(pattern) .with_context(|| format!("failed to parse '{}'", pattern))?; // We add 1 to account for the capturing group for the entire // pattern. Ok(1 + count_capturing_groups_ast(&ast)) } fn count_capturing_groups_ast(ast: &regex_syntax::ast::Ast) -> usize { use regex_syntax::ast::Ast; match *ast { Ast::Empty(_) | Ast::Flags(_) | Ast::Literal(_) | Ast::Dot(_) | Ast::Assertion(_) | Ast::Class(_) => 0, Ast::Repetition(ref rep) => count_capturing_groups_ast(&*rep.ast), Ast::Group(ref group) => { let this = if group.is_capturing() { 1 } else { 0 }; this + count_capturing_groups_ast(&*group.ast) } Ast::Alternation(ref alt) => { alt.asts.iter().map(count_capturing_groups_ast).sum() } Ast::Concat(ref concat) => { concat.asts.iter().map(count_capturing_groups_ast).sum() } } } <file_sep>/regex-automata/src/util/empty.rs /*! This module provides helper routines for dealing with zero-width matches. The main problem being solved here is this: 1. The caller wants to search something that they know is valid UTF-8, such as a Rust `&str`. 2. The regex used by the caller can match the empty string. For example, `a*`. 3. The caller should never get match offsets returned that occur within the encoding of a UTF-8 codepoint. It is logically incorrect, and also means that, e.g., slicing the `&str` at those offsets will lead to a panic. So the question here is, how do we prevent the caller from getting match offsets that split a codepoint? For example, strictly speaking, the regex `a*` matches `☃` at the positions `[0, 0]`, `[1, 1]`, `[2, 2]` and `[3, 3]` since the UTF-8 encoding of `☃` is `\xE2\x98\x83`. In particular, the `NFA` that underlies all of the matching engines in this crate doesn't have anything in its state graph that prevents matching between UTF-8 code units. Indeed, any engine derived from the `NFA` will match at those positions by virtue of the fact that the `NFA` is byte oriented. That is, its transitions are defined over bytes and the matching engines work by proceeding one byte at a time. (An alternative architecture would be to define the transitions in an `NFA` over codepoints, or `char`. And then make the matching engines proceed by decoding one codepoint at a time. This is a viable strategy, but it doesn't work for DFA matching engines because designing a fast and memory efficient transition table for an alphabet as large as Unicode is quite difficult. More to the point, the top-level `regex` crate supports matching on arbitrary bytes when Unicode mode is disabled and one is searching a `&[u8]`. So in that case, you can't just limit yourself to decoding codepoints and matching those. You really do need to be able to follow byte oriented transitions on the `NFA`.) In an older version of the regex crate, we handled this case not in the regex engine, but in the iterators over matches. Namely, since this case only arises when the match is empty, we "just" incremented the next starting position of the search by `N`, where `N` is the length of the codepoint encoded at the current position. The alternative or more "natural" solution of just incrementing by `1` would result in executing a search of `a*` on `☃` like this: * Start search at `0`. * Found match at `[0, 0]`. * Next start position is `0`. * To avoid an infinite loop, since it's an empty match, increment by `1`. * Start search at `1`. * Found match at `[1, 1]`. Oops. But if we instead incremented by `3` (the length in bytes of `☃`), then we get the following: * Start search at `0`. * Found match at `[0, 0]`. * Next start position is `0`. * To avoid an infinite loop, since it's an empty match, increment by `3`. * Start search at `3`. * Found match at `[3, 3]`. And we get the correct result. But does this technique work in all cases? Crucially, it requires that a zero-width match that splits a codepoint never occurs beyond the starting position of the search. Because if it did, merely incrementing the start position by the number of bytes in the codepoint at the current position wouldn't be enough. A zero-width match could just occur anywhere. It turns out that it is _almost_ true. We can convince ourselves by looking at all possible patterns that can match the empty string: * Patterns like `a*`, `a{0}`, `(?:)`, `a|` and `|a` all unconditionally match the empty string. That is, assuming there isn't an `a` at the current position, they will all match the empty string at the start of a search. There is no way to move past it because any other match would not be "leftmost." * `^` only matches at the beginning of the haystack, where the start position is `0`. Since we know we're searching valid UTF-8 (if it isn't valid UTF-8, then this entire problem goes away because it implies your string type supports invalid UTF-8 and thus must deal with offsets that not only split a codepoint but occur in entirely invalid UTF-8 somehow), it follows that `^` never matches between the code units of a codepoint because the start of a valid UTF-8 string is never within the encoding of a codepoint. * `$` basically the same logic as `^`, but for the end of a string. A valid UTF-8 string can't have an incomplete codepoint at the end of it. * `(?m:^)` follows similarly to `^`, but it can match immediately following a `\n`. However, since a `\n` is always a codepoint itself and can never appear within a codepoint, it follows that the position immediately following a `\n` in a string that is valid UTF-8 is guaranteed to not be between the code units of another codepoint. (One caveat here is that the line terminator for multi-line anchors can now be changed to any arbitrary byte, including things like `\x98` which might occur within a codepoint. However, this wasn't supported by the old regex crate. If it was, it pose the same problems as `(?-u:\B)`, as we'll discuss below.) * `(?m:$)` a similar argument as for `(?m:^)`. The only difference is that a `(?m:$)` matches just before a `\n`. But the same argument applies. * `(?Rm:^)` and `(?Rm:$)` weren't supported by the old regex crate, but the CRLF aware line anchors follow a similar argument as for `(?m:^)` and `(?m:$)`. Namely, since they only ever match at a boundary where one side is either a `\r` or a `\n`, neither of which can occur within a codepoint. * `\b` only matches at positions where both sides are valid codepoints, so this cannot split a codepoint. * `\B`, like `\b`, also only matches at positions where both sides are valid codepoints. So this cannot split a codepoint either. * `(?-u:\b)` matches only at positions where at least one side of it is an ASCII word byte. Since ASCII bytes cannot appear as code units in non-ASCII codepoints (one of the many amazing qualities of UTF-8), it follows that this too cannot split a codepoint. * `(?-u:\B)` finally represents a problem. It can matches between *any* two bytes that are either both word bytes or non-word bytes. Since code units like `\xE2` and `\x98` (from the UTF-8 encoding of `☃`) are both non-word bytes, `(?-u:\B)` will match at the position between them. Thus, our approach of incrementing one codepoint at a time after seeing an empty match is flawed because `(?-u:\B)` can result in an empty match that splits a codepoint at a position past the starting point of a search. For example, searching `(?-u:\B)` on `a☃` would produce the following matches: `[2, 2]`, `[3, 3]` and `[4, 4]`. The positions at `0` and `1` don't match because they correspond to word boundaries since `a` is an ASCII word byte. So what did the old regex crate do to avoid this? It banned `(?-u:\B)` from regexes that could match `&str`. That might sound extreme, but a lot of other things were banned too. For example, all of `(?-u:.)`, `(?-u:[^a])` and `(?-u:\W)` can match invalid UTF-8 too, including individual code units with a codepoint. The key difference is that those expressions could never produce an empty match. That ban happens when translating an `Ast` to an `Hir`, because that process that reason about whether an `Hir` can produce *non-empty* matches at invalid UTF-8 boundaries. Bottom line though is that we side-stepped the `(?-u:\B)` issue by banning it. If banning `(?-u:\B)` were the only issue with the old regex crate's approach, then I probably would have kept it. `\B` is rarely used, so it's not such a big deal to have to work-around it. However, the problem with the above approach is that it doesn't compose. The logic for avoiding splitting a codepoint only lived in the iterator, which means if anyone wants to implement their own iterator over regex matches, they have to deal with this extremely subtle edge case to get full correctness. Instead, in this crate, we take the approach of pushing this complexity down to the lowest layers of each regex engine. The approach is pretty simple: * If this corner case doesn't apply, don't do anything. (For example, if UTF-8 mode isn't enabled or if the regex cannot match the empty string.) * If an empty match is reported, explicitly check if it splits a codepoint. * If it doesn't, we're done, return the match. * If it does, then ignore the match and re-run the search. * Repeat the above process until the end of the haystack is reached or a match is found that doesn't split a codepoint or isn't zero width. And that's pretty much what this module provides. Every regex engine uses these methods in their lowest level public APIs, but just above the layer where their internal engine is used. That way, all regex engines can be arbitrarily composed without worrying about handling this case, and iterators don't need to handle it explicitly. (It turns out that a new feature I added, support for changing the line terminator in a regex to any arbitrary byte, also provokes the above problem. Namely, the byte could be invalid UTF-8 or a UTF-8 continuation byte. So that support would need to be limited or banned when UTF-8 mode is enabled, just like we did for `(?-u:\B)`. But thankfully our more robust approach in this crate handles that case just fine too.) */ use crate::util::search::{Input, MatchError}; #[cold] #[inline(never)] pub(crate) fn skip_splits_fwd<T, F>( input: &Input<'_>, init_value: T, match_offset: usize, find: F, ) -> Result<Option<T>, MatchError> where F: FnMut(&Input<'_>) -> Result<Option<(T, usize)>, MatchError>, { skip_splits(true, input, init_value, match_offset, find) } #[cold] #[inline(never)] pub(crate) fn skip_splits_rev<T, F>( input: &Input<'_>, init_value: T, match_offset: usize, find: F, ) -> Result<Option<T>, MatchError> where F: FnMut(&Input<'_>) -> Result<Option<(T, usize)>, MatchError>, { skip_splits(false, input, init_value, match_offset, find) } fn skip_splits<T, F>( forward: bool, input: &Input<'_>, init_value: T, mut match_offset: usize, mut find: F, ) -> Result<Option<T>, MatchError> where F: FnMut(&Input<'_>) -> Result<Option<(T, usize)>, MatchError>, { // If our config says to do an anchored search, then we're definitely // done. We just need to determine whether we have a valid match or // not. If we don't, then we're not allowed to continue, so we report // no match. // // This is actually quite a subtle correctness thing. The key here is // that if we got an empty match that splits a codepoint after doing an // anchored search in UTF-8 mode, then that implies that we must have // *started* the search at a location that splits a codepoint. This // follows from the fact that if a match is reported from an anchored // search, then the start offset of the match *must* match the start // offset of the search. // // It also follows that no other non-empty match is possible. For // example, you might write a regex like '(?:)|SOMETHING' and start its // search in the middle of a codepoint. The first branch is an empty // regex that will bubble up a match at the first position, and then // get rejected here and report no match. But what if 'SOMETHING' could // have matched? We reason that such a thing is impossible, because // if it does, it must report a match that starts in the middle of a // codepoint. This in turn implies that a match is reported whose span // does not correspond to valid UTF-8, and this breaks the promise // made when UTF-8 mode is enabled. (That promise *can* be broken, for // example, by enabling UTF-8 mode but building an by hand NFA that // produces non-empty matches that span invalid UTF-8. This is an unchecked // but documented precondition violation of UTF-8 mode, and is documented // to have unspecified behavior.) // // I believe this actually means that if an anchored search is run, and // UTF-8 mode is enabled and the start position splits a codepoint, // then it is correct to immediately report no match without even // executing the regex engine. But it doesn't really seem worth writing // out that case in every regex engine to save a tiny bit of work in an // extremely pathological case, so we just handle it here. if input.get_anchored().is_anchored() { return Ok(if input.is_char_boundary(match_offset) { Some(init_value) } else { None }); } // Otherwise, we have an unanchored search, so just keep looking for // matches until we have one that does not split a codepoint or we hit // EOI. let mut value = init_value; let mut input = input.clone(); while !input.is_char_boundary(match_offset) { if forward { // The unwrap is OK here because overflowing usize while // iterating over a slice is impossible, at it would require // a slice of length greater than isize::MAX, which is itself // impossible. input.set_start(input.start().checked_add(1).unwrap()); } else { input.set_end(match input.end().checked_sub(1) { None => return Ok(None), Some(end) => end, }); } match find(&input)? { None => return Ok(None), Some((new_value, new_match_end)) => { value = new_value; match_offset = new_match_end; } } } Ok(Some(value)) } <file_sep>/regex-automata/src/util/start.rs /*! Provides some helpers for dealing with start state configurations in DFAs. [`Start`] represents the possible starting configurations, while [`StartByteMap`] represents a way to retrieve the `Start` configuration for a given position in a haystack. */ use crate::util::{ look::LookMatcher, search::Input, wire::{self, DeserializeError, SerializeError}, }; /// A map from every possible byte value to its corresponding starting /// configuration. /// /// This map is used in order to lookup the start configuration for a particular /// position in a haystack. This start configuration is then used in /// combination with things like the anchored mode and pattern ID to fully /// determine the start state. /// /// Generally speaking, this map is only used for fully compiled DFAs and lazy /// DFAs. For NFAs (including the one-pass DFA), the start state is generally /// selected by virtue of traversing the NFA state graph. DFAs do the same /// thing, but at build time and not search time. (Well, technically the lazy /// DFA does it at search time, but it does enough work to cache the full /// result of the epsilon closure that the NFA engines tend to need to do.) #[derive(Clone)] pub(crate) struct StartByteMap { map: [Start; 256], } impl StartByteMap { /// Create a new map from byte values to their corresponding starting /// configurations. The map is determined, in part, by how look-around /// assertions are matched via the matcher given. pub(crate) fn new(lookm: &LookMatcher) -> StartByteMap { let mut map = [Start::NonWordByte; 256]; map[usize::from(b'\n')] = Start::LineLF; map[usize::from(b'\r')] = Start::LineCR; map[usize::from(b'_')] = Start::WordByte; let mut byte = b'0'; while byte <= b'9' { map[usize::from(byte)] = Start::WordByte; byte += 1; } byte = b'A'; while byte <= b'Z' { map[usize::from(byte)] = Start::WordByte; byte += 1; } byte = b'a'; while byte <= b'z' { map[usize::from(byte)] = Start::WordByte; byte += 1; } let lineterm = lookm.get_line_terminator(); // If our line terminator is normal, then it is already handled by // the LineLF and LineCR configurations. But if it's weird, then we // overwrite whatever was there before for that terminator with a // special configuration. The trick here is that if the terminator // is, say, a word byte like `a`, then callers seeing this start // configuration need to account for that and build their DFA state as // if it *also* came from a word byte. if lineterm != b'\r' && lineterm != b'\n' { map[usize::from(lineterm)] = Start::CustomLineTerminator; } StartByteMap { map } } /// Return the forward starting configuration for the given `input`. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn fwd(&self, input: &Input) -> Start { match input .start() .checked_sub(1) .and_then(|i| input.haystack().get(i)) { None => Start::Text, Some(&byte) => self.get(byte), } } /// Return the reverse starting configuration for the given `input`. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn rev(&self, input: &Input) -> Start { match input.haystack().get(input.end()) { None => Start::Text, Some(&byte) => self.get(byte), } } #[cfg_attr(feature = "perf-inline", inline(always))] fn get(&self, byte: u8) -> Start { self.map[usize::from(byte)] } /// Deserializes a byte class map from the given slice. If the slice is of /// insufficient length or otherwise contains an impossible mapping, then /// an error is returned. Upon success, the number of bytes read along with /// the map are returned. The number of bytes read is always a multiple of /// 8. pub(crate) fn from_bytes( slice: &[u8], ) -> Result<(StartByteMap, usize), DeserializeError> { wire::check_slice_len(slice, 256, "start byte map")?; let mut map = [Start::NonWordByte; 256]; for (i, &repr) in slice[..256].iter().enumerate() { map[i] = match Start::from_usize(usize::from(repr)) { Some(start) => start, None => { return Err(DeserializeError::generic( "found invalid starting configuration", )) } }; } Ok((StartByteMap { map }, 256)) } /// Writes this map to the given byte buffer. if the given buffer is too /// small, then an error is returned. Upon success, the total number of /// bytes written is returned. The number of bytes written is guaranteed to /// be a multiple of 8. pub(crate) fn write_to( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("start byte map")); } for (i, &start) in self.map.iter().enumerate() { dst[i] = start.as_u8(); } Ok(nwrite) } /// Returns the total number of bytes written by `write_to`. pub(crate) fn write_to_len(&self) -> usize { 256 } } impl core::fmt::Debug for StartByteMap { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { use crate::util::escape::DebugByte; write!(f, "StartByteMap{{")?; for byte in 0..=255 { if byte > 0 { write!(f, ", ")?; } let start = self.map[usize::from(byte)]; write!(f, "{:?} => {:?}", DebugByte(byte), start)?; } write!(f, "}}")?; Ok(()) } } /// Represents the six possible starting configurations of a DFA search. /// /// The starting configuration is determined by inspecting the the beginning /// of the haystack (up to 1 byte). Ultimately, this along with a pattern ID /// (if specified) and the type of search (anchored or not) is what selects the /// start state to use in a DFA. /// /// As one example, if a DFA only supports unanchored searches and does not /// support anchored searches for each pattern, then it will have at most 6 /// distinct start states. (Some start states may be reused if determinization /// can determine that they will be equivalent.) If the DFA supports both /// anchored and unanchored searches, then it will have a maximum of 12 /// distinct start states. Finally, if the DFA also supports anchored searches /// for each pattern, then it can have up to `12 + (N * 6)` start states, where /// `N` is the number of patterns. /// /// Handling each of these starting configurations in the context of DFA /// determinization can be *quite* tricky and subtle. But the code is small /// and can be found at `crate::util::determinize::set_lookbehind_from_start`. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum Start { /// This occurs when the starting position is not any of the ones below. NonWordByte = 0, /// This occurs when the byte immediately preceding the start of the search /// is an ASCII word byte. WordByte = 1, /// This occurs when the starting position of the search corresponds to the /// beginning of the haystack. Text = 2, /// This occurs when the byte immediately preceding the start of the search /// is a line terminator. Specifically, `\n`. LineLF = 3, /// This occurs when the byte immediately preceding the start of the search /// is a line terminator. Specifically, `\r`. LineCR = 4, /// This occurs when a custom line terminator has been set via a /// `LookMatcher`, and when that line terminator is neither a `\r` or a /// `\n`. /// /// If the custom line terminator is a word byte, then this start /// configuration is still selected. DFAs that implement word boundary /// assertions will likely need to check whether the custom line terminator /// is a word byte, in which case, it should behave as if the byte /// satisfies `\b` in addition to multi-line anchors. CustomLineTerminator = 5, } impl Start { /// Return the starting state corresponding to the given integer. If no /// starting state exists for the given integer, then None is returned. pub(crate) fn from_usize(n: usize) -> Option<Start> { match n { 0 => Some(Start::NonWordByte), 1 => Some(Start::WordByte), 2 => Some(Start::Text), 3 => Some(Start::LineLF), 4 => Some(Start::LineCR), 5 => Some(Start::CustomLineTerminator), _ => None, } } /// Returns the total number of starting state configurations. pub(crate) fn len() -> usize { 6 } /// Return this starting configuration as `u8` integer. It is guaranteed to /// be less than `Start::len()`. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn as_u8(&self) -> u8 { // AFAIK, 'as' is the only way to zero-cost convert an int enum to an // actual int. *self as u8 } /// Return this starting configuration as a `usize` integer. It is /// guaranteed to be less than `Start::len()`. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn as_usize(&self) -> usize { usize::from(self.as_u8()) } } #[cfg(test)] mod tests { use super::*; #[test] fn start_fwd_done_range() { let smap = StartByteMap::new(&LookMatcher::default()); assert_eq!(Start::Text, smap.fwd(&Input::new("").range(1..0))); } #[test] fn start_rev_done_range() { let smap = StartByteMap::new(&LookMatcher::default()); assert_eq!(Start::Text, smap.rev(&Input::new("").range(1..0))); } #[test] fn start_fwd() { let f = |haystack, start, end| { let smap = StartByteMap::new(&LookMatcher::default()); let input = &Input::new(haystack).range(start..end); smap.fwd(input) }; assert_eq!(Start::Text, f("", 0, 0)); assert_eq!(Start::Text, f("abc", 0, 3)); assert_eq!(Start::Text, f("\nabc", 0, 3)); assert_eq!(Start::LineLF, f("\nabc", 1, 3)); assert_eq!(Start::LineCR, f("\rabc", 1, 3)); assert_eq!(Start::WordByte, f("abc", 1, 3)); assert_eq!(Start::NonWordByte, f(" abc", 1, 3)); } #[test] fn start_rev() { let f = |haystack, start, end| { let smap = StartByteMap::new(&LookMatcher::default()); let input = &Input::new(haystack).range(start..end); smap.rev(input) }; assert_eq!(Start::Text, f("", 0, 0)); assert_eq!(Start::Text, f("abc", 0, 3)); assert_eq!(Start::Text, f("abc\n", 0, 4)); assert_eq!(Start::LineLF, f("abc\nz", 0, 3)); assert_eq!(Start::LineCR, f("abc\rz", 0, 3)); assert_eq!(Start::WordByte, f("abc", 0, 2)); assert_eq!(Start::NonWordByte, f("abc ", 0, 3)); } } <file_sep>/regex-automata/tests/dfa/onepass/suite.rs use { anyhow::Result, regex_automata::{ dfa::onepass::{self, DFA}, nfa::thompson, util::{iter, syntax}, }, regex_test::{ CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, TestRunner, }, }; use crate::{create_input, suite, testify_captures, untestify_kind}; const EXPANSIONS: &[&str] = &["is_match", "find", "captures"]; /// Tests the default configuration of the hybrid NFA/DFA. #[test] fn default() -> Result<()> { let builder = DFA::builder(); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA when 'starts_for_each_pattern' is enabled for all /// tests. #[test] fn starts_for_each_pattern() -> Result<()> { let mut builder = DFA::builder(); builder.configure(DFA::config().starts_for_each_pattern(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA when byte classes are disabled. /// /// N.B. Disabling byte classes doesn't avoid any indirection at search time. /// All it does is cause every byte value to be its own distinct equivalence /// class. #[test] fn no_byte_classes() -> Result<()> { let mut builder = DFA::builder(); builder.configure(DFA::config().byte_classes(false)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } fn compiler( mut builder: onepass::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { move |test, regexes| { // Check if our regex contains things that aren't supported by DFAs. // That is, Unicode word boundaries when searching non-ASCII text. if !configure_onepass_builder(test, &mut builder) { return Ok(CompiledRegex::skip()); } let re = match builder.build_many(&regexes) { Ok(re) => re, Err(err) => { let msg = err.to_string(); // This is pretty gross, but when a regex fails to compile as // a one-pass regex, then we want to be OK with that and just // skip the test. But we have to be careful to only skip it // when the expected result is that the regex compiles. If // the test is specifically checking that the regex does not // compile, then we should bubble up that error and allow the // test to pass. // // Since our error types are all generally opaque, we just // look for an error string. Not great, but not the end of the // world. if test.compiles() && msg.contains("not one-pass") { return Ok(CompiledRegex::skip()); } return Err(err.into()); } }; let mut cache = re.create_cache(); Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, &mut cache, test) })) } } fn run_test( re: &DFA, cache: &mut onepass::Cache, test: &RegexTest, ) -> TestResult { let input = create_input(test); match test.additional_name() { "is_match" => { TestResult::matched(re.is_match(cache, input.earliest(true))) } "find" => match test.search_kind() { SearchKind::Earliest | SearchKind::Leftmost => { let input = input.earliest(test.search_kind() == SearchKind::Earliest); let mut caps = re.create_captures(); let it = iter::Searcher::new(input) .into_matches_iter(|input| { re.try_search(cache, input, &mut caps)?; Ok(caps.get_match()) }) .infallible() .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }); TestResult::matches(it) } SearchKind::Overlapping => { // The one-pass DFA does not support any kind of overlapping // search. This is not just a matter of not having the API. // It's fundamentally incompatible with the one-pass concept. // If overlapping matches were possible, then the one-pass DFA // would fail to build. TestResult::skip() } }, "captures" => match test.search_kind() { SearchKind::Earliest | SearchKind::Leftmost => { let input = input.earliest(test.search_kind() == SearchKind::Earliest); let it = iter::Searcher::new(input) .into_captures_iter(re.create_captures(), |input, caps| { re.try_search(cache, input, caps) }) .infallible() .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|caps| testify_captures(&caps)); TestResult::captures(it) } SearchKind::Overlapping => { // The one-pass DFA does not support any kind of overlapping // search. This is not just a matter of not having the API. // It's fundamentally incompatible with the one-pass concept. // If overlapping matches were possible, then the one-pass DFA // would fail to build. TestResult::skip() } }, name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Configures the given regex builder with all relevant settings on the given /// regex test. /// /// If the regex test has a setting that is unsupported, then this returns /// false (implying the test should be skipped). fn configure_onepass_builder( test: &RegexTest, builder: &mut onepass::Builder, ) -> bool { if !test.anchored() { return false; } let match_kind = match untestify_kind(test.match_kind()) { None => return false, Some(k) => k, }; let config = DFA::config().match_kind(match_kind); builder .configure(config) .syntax(config_syntax(test)) .thompson(config_thompson(test)); true } /// Configuration of a Thompson NFA compiler from a regex test. fn config_thompson(test: &RegexTest) -> thompson::Config { let mut lookm = regex_automata::util::look::LookMatcher::new(); lookm.set_line_terminator(test.line_terminator()); thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) } /// Configuration of the regex parser from a regex test. fn config_syntax(test: &RegexTest) -> syntax::Config { syntax::Config::new() .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .utf8(test.utf8()) .line_terminator(test.line_terminator()) } <file_sep>/fuzz/fuzz_targets/ast_fuzz_match.rs #![no_main] use { libfuzzer_sys::{fuzz_target, Corpus}, regex::RegexBuilder, regex_syntax::ast::Ast, }; #[derive(Eq, PartialEq, arbitrary::Arbitrary)] struct FuzzData { ast: Ast, haystack: String, } impl std::fmt::Debug for FuzzData { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut builder = f.debug_struct("FuzzData"); builder.field("ast", &format!("{}", self.ast)); builder.field("haystack", &self.haystack); builder.finish() } } fuzz_target!(|data: FuzzData| -> Corpus { let _ = env_logger::try_init(); let pattern = format!("{}", data.ast); let Ok(re) = RegexBuilder::new(&pattern).size_limit(1 << 20).build() else { return Corpus::Reject; }; let _ = re.is_match(&data.haystack); let _ = re.find(&data.haystack); let _ = re.captures(&data.haystack).map_or(0, |c| c.len()); Corpus::Keep }); <file_sep>/regex-lite/tests/fuzz/mod.rs #[test] fn captures_wrong_order() { let data = include_bytes!( "testdata/crash-a886ce2b0d64963f1232f9b08b8c9ad4740c26f5" ); let _ = run(data); } #[test] fn captures_wrong_order_min() { let data = include_bytes!( "testdata/minimized-from-298f84f9dbb2589cb9938a63334fa4083b609f34" ); let _ = run(data); } // This is the fuzz target function. We duplicate it here since this is the // thing we use to interpret the data. It is ultimately what we want to // succeed. fn run(data: &[u8]) -> Option<()> { if data.len() < 2 { return None; } let mut split_at = usize::from(data[0]); let data = std::str::from_utf8(&data[1..]).ok()?; // Split data into a regex and haystack to search. let len = usize::try_from(data.chars().count()).ok()?; split_at = std::cmp::max(split_at, 1) % len; let char_index = data.char_indices().nth(split_at)?.0; let (pattern, input) = data.split_at(char_index); let re = regex_lite::Regex::new(pattern).ok()?; re.is_match(input); Some(()) } <file_sep>/regex-automata/src/util/prefilter/aho_corasick.rs use crate::util::{ prefilter::PrefilterI, search::{MatchKind, Span}, }; #[derive(Clone, Debug)] pub(crate) struct AhoCorasick { #[cfg(not(feature = "perf-literal-multisubstring"))] _unused: (), #[cfg(feature = "perf-literal-multisubstring")] ac: aho_corasick::AhoCorasick, } impl AhoCorasick { pub(crate) fn new<B: AsRef<[u8]>>( kind: MatchKind, needles: &[B], ) -> Option<AhoCorasick> { #[cfg(not(feature = "perf-literal-multisubstring"))] { None } #[cfg(feature = "perf-literal-multisubstring")] { // We used to use `aho_corasick::MatchKind::Standard` here when // `kind` was `MatchKind::All`, but this is not correct. The // "standard" Aho-Corasick match semantics are to report a match // immediately as soon as it is seen, but `All` isn't like that. // In particular, with "standard" semantics, given the needles // "abc" and "b" and the haystack "abc," it would report a match // at offset 1 before a match at offset 0. This is never what we // want in the context of the regex engine, regardless of whether // we have leftmost-first or 'all' semantics. Namely, we always // want the leftmost match. let ac_match_kind = match kind { MatchKind::LeftmostFirst | MatchKind::All => { aho_corasick::MatchKind::LeftmostFirst } }; // This is kind of just an arbitrary number, but basically, if we // have a small enough set of literals, then we try to use the VERY // memory hungry DFA. Otherwise, we whimp out and use an NFA. The // upshot is that the NFA is quite lean and decently fast. Faster // than a naive Aho-Corasick NFA anyway. let ac_kind = if needles.len() <= 500 { aho_corasick::AhoCorasickKind::DFA } else { aho_corasick::AhoCorasickKind::ContiguousNFA }; let result = aho_corasick::AhoCorasick::builder() .kind(Some(ac_kind)) .match_kind(ac_match_kind) .start_kind(aho_corasick::StartKind::Both) // We try to handle all of the prefilter cases in the super // module, and only use Aho-Corasick for the actual automaton. // The aho-corasick crate does have some extra prefilters, // namely, looking for rare bytes to feed to memchr{,2,3} // instead of just the first byte. If we end up wanting // those---and they are somewhat tricky to implement---then // we could port them to this crate. // // The main reason for doing things this way is so we have a // complete and easy to understand picture of which prefilters // are available and how they work. Otherwise it seems too // easy to get into a situation where we have a prefilter // layered on top of prefilter, and that might have unintended // consequences. .prefilter(false) .build(needles); let ac = match result { Ok(ac) => ac, Err(_err) => { debug!("aho-corasick prefilter failed to build: {}", _err); return None; } }; Some(AhoCorasick { ac }) } } } impl PrefilterI for AhoCorasick { fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { let input = aho_corasick::Input::new(haystack).span(span.start..span.end); self.ac .find(input) .map(|m| Span { start: m.start(), end: m.end() }) } } fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { let input = aho_corasick::Input::new(haystack) .anchored(aho_corasick::Anchored::Yes) .span(span.start..span.end); self.ac .find(input) .map(|m| Span { start: m.start(), end: m.end() }) } } fn memory_usage(&self) -> usize { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { self.ac.memory_usage() } } fn is_fast(&self) -> bool { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { // Aho-Corasick is never considered "fast" because it's never // going to be even close to an order of magnitude faster than the // regex engine itself (assuming a DFA is used). In fact, it is // usually slower. The magic of Aho-Corasick is that it can search // a *large* number of literals with a relatively small amount of // memory. The regex engines are far more wasteful. // // Aho-Corasick may be "fast" when the regex engine corresponds // to, say, the PikeVM. That happens when the lazy DFA couldn't be // built or used for some reason. But in these cases, the regex // itself is likely quite big and we're probably hosed no matter // what we do. (In this case, the best bet is for the caller to // increase some of the memory limits on the hybrid cache capacity // and hope that's enough.) false } } } <file_sep>/regex-automata/src/util/prefilter/teddy.rs use crate::util::{ prefilter::PrefilterI, search::{MatchKind, Span}, }; #[derive(Clone, Debug)] pub(crate) struct Teddy { #[cfg(not(feature = "perf-literal-multisubstring"))] _unused: (), /// The actual Teddy searcher. /// /// Technically, it's possible that Teddy doesn't actually get used, since /// Teddy does require its haystack to at least be of a certain size /// (usually around the size of whatever vector is being used, so ~16 /// or ~32 bytes). For haystacks shorter than that, the implementation /// currently uses Rabin-Karp. #[cfg(feature = "perf-literal-multisubstring")] searcher: aho_corasick::packed::Searcher, /// When running an anchored search, the packed searcher can't handle it so /// we defer to Aho-Corasick itself. Kind of sad, but changing the packed /// searchers to support anchored search would be difficult at worst and /// annoying at best. Since packed searchers only apply to small numbers of /// literals, we content ourselves that this is not much of an added cost. /// (That packed searchers only work with a small number of literals is /// also why we use a DFA here. Otherwise, the memory usage of a DFA would /// likely be unacceptable.) #[cfg(feature = "perf-literal-multisubstring")] anchored_ac: aho_corasick::dfa::DFA, /// The length of the smallest literal we look for. /// /// We use this as a heuristic to figure out whether this will be "fast" or /// not. Generally, the longer the better, because longer needles are more /// discriminating and thus reduce false positive rate. #[cfg(feature = "perf-literal-multisubstring")] minimum_len: usize, } impl Teddy { pub(crate) fn new<B: AsRef<[u8]>>( kind: MatchKind, needles: &[B], ) -> Option<Teddy> { #[cfg(not(feature = "perf-literal-multisubstring"))] { None } #[cfg(feature = "perf-literal-multisubstring")] { // We only really support leftmost-first semantics. In // theory we could at least support leftmost-longest, as the // aho-corasick crate does, but regex-automata doesn't know about // leftmost-longest currently. // // And like the aho-corasick prefilter, if we're using `All` // semantics, then we can still use leftmost semantics for a // prefilter. (This might be a suspicious choice for the literal // engine, which uses a prefilter as a regex engine directly, but // that only happens when using leftmost-first semantics.) let (packed_match_kind, ac_match_kind) = match kind { MatchKind::LeftmostFirst | MatchKind::All => ( aho_corasick::packed::MatchKind::LeftmostFirst, aho_corasick::MatchKind::LeftmostFirst, ), }; let minimum_len = needles.iter().map(|n| n.as_ref().len()).min().unwrap_or(0); let packed = aho_corasick::packed::Config::new() .match_kind(packed_match_kind) .builder() .extend(needles) .build()?; let anchored_ac = aho_corasick::dfa::DFA::builder() .match_kind(ac_match_kind) .start_kind(aho_corasick::StartKind::Anchored) .prefilter(false) .build(needles) .ok()?; Some(Teddy { searcher: packed, anchored_ac, minimum_len }) } } } impl PrefilterI for Teddy { fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { let ac_span = aho_corasick::Span { start: span.start, end: span.end }; self.searcher .find_in(haystack, ac_span) .map(|m| Span { start: m.start(), end: m.end() }) } } fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { use aho_corasick::automaton::Automaton; let input = aho_corasick::Input::new(haystack) .anchored(aho_corasick::Anchored::Yes) .span(span.start..span.end); self.anchored_ac .try_find(&input) // OK because we build the DFA with anchored support. .expect("aho-corasick DFA should never fail") .map(|m| Span { start: m.start(), end: m.end() }) } } fn memory_usage(&self) -> usize { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { use aho_corasick::automaton::Automaton; self.searcher.memory_usage() + self.anchored_ac.memory_usage() } } fn is_fast(&self) -> bool { #[cfg(not(feature = "perf-literal-multisubstring"))] { unreachable!() } #[cfg(feature = "perf-literal-multisubstring")] { // Teddy is usually quite fast, but I have seen some cases where // a large number of literals can overwhelm it and make it not so // fast. We make an educated but conservative guess at a limit, at // which point, we're not so comfortable thinking Teddy is "fast." // // Well... this used to incorporate a "limit" on the *number* // of literals, but I have since changed it to a minimum on the // *smallest* literal. Namely, when there is a very small literal // (1 or 2 bytes), it is far more likely that it leads to a higher // false positive rate. (Although, of course, not always. For // example, 'zq' is likely to have a very low false positive rate.) // But when we have 3 bytes, we have a really good chance of being // quite discriminatory and thus fast. // // We may still want to add some kind of limit on the number of // literals here, but keep in mind that Teddy already has its own // somewhat small limit (64 at time of writing). The main issue // here is that if 'is_fast' is false, it opens the door for the // reverse inner optimization to kick in. We really only want to // resort to the reverse inner optimization if we absolutely must. self.minimum_len >= 3 } } } <file_sep>/Cross.toml [build.env] passthrough = [ "RUST_BACKTRACE", "RUST_LOG", "REGEX_TEST", "REGEX_TEST_VERBOSE", ] <file_sep>/testdata/flags.toml [[test]] name = "1" regex = "(?i)abc" haystack = "ABC" matches = [[0, 3]] [[test]] name = "2" regex = "(?i)a(?-i)bc" haystack = "Abc" matches = [[0, 3]] [[test]] name = "3" regex = "(?i)a(?-i)bc" haystack = "ABC" matches = [] [[test]] name = "4" regex = "(?is)a." haystack = "A\n" matches = [[0, 2]] [[test]] name = "5" regex = "(?is)a.(?-is)a." haystack = "A\nab" matches = [[0, 4]] [[test]] name = "6" regex = "(?is)a.(?-is)a." haystack = "A\na\n" matches = [] [[test]] name = "7" regex = "(?is)a.(?-is:a.)?" haystack = "A\na\n" matches = [[0, 2]] match-limit = 1 [[test]] name = "8" regex = "(?U)a+" haystack = "aa" matches = [[0, 1]] match-limit = 1 [[test]] name = "9" regex = "(?U)a+?" haystack = "aa" matches = [[0, 2]] [[test]] name = "10" regex = "(?U)(?-U)a+" haystack = "aa" matches = [[0, 2]] [[test]] name = "11" regex = '(?m)(?:^\d+$\n?)+' haystack = "123\n456\n789" matches = [[0, 11]] unicode = false <file_sep>/regex-automata/src/util/look.rs /*! Types and routines for working with look-around assertions. This module principally defines two types: * [`Look`] enumerates all of the assertions supported by this crate. * [`LookSet`] provides a way to efficiently store a set of [`Look`] values. * [`LookMatcher`] provides routines for checking whether a `Look` or a `LookSet` matches at a particular position in a haystack. */ // LAMENTATION: Sadly, a lot of the API of `Look` and `LookSet` were basically // copied verbatim from the regex-syntax crate. I would have no problems using // the regex-syntax types and defining the matching routines (only found // in this crate) as free functions, except the `Look` and `LookSet` types // are used in lots of places. Including in places we expect to work when // regex-syntax is *not* enabled, such as in the definition of the NFA itself. // // Thankfully the code we copy is pretty simple and there isn't much of it. // Otherwise, the rest of this module deals with *matching* the assertions, // which is not something that regex-syntax handles. use crate::util::{escape::DebugByte, utf8}; /// A look-around assertion. /// /// An assertion matches at a position between characters in a haystack. /// Namely, it does not actually "consume" any input as most parts of a regular /// expression do. Assertions are a way of stating that some property must be /// true at a particular point during matching. /// /// For example, `(?m)^[a-z]+$` is a pattern that: /// /// * Scans the haystack for a position at which `(?m:^)` is satisfied. That /// occurs at either the beginning of the haystack, or immediately following /// a `\n` character. /// * Looks for one or more occurrences of `[a-z]`. /// * Once `[a-z]+` has matched as much as it can, an overall match is only /// reported when `[a-z]+` stops just before a `\n`. /// /// So in this case, `abc` and `\nabc\n` match, but `\nabc1\n` does not. /// /// Assertions are also called "look-around," "look-behind" and "look-ahead." /// Specifically, some assertions are look-behind (like `^`), other assertions /// are look-ahead (like `$`) and yet other assertions are both look-ahead and /// look-behind (like `\b`). /// /// # Assertions in an NFA /// /// An assertion in a [`thompson::NFA`](crate::nfa::thompson::NFA) can be /// thought of as a conditional epsilon transition. That is, a matching engine /// like the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) only permits /// moving through conditional epsilon transitions when their condition /// is satisfied at whatever position the `PikeVM` is currently at in the /// haystack. /// /// How assertions are handled in a `DFA` is trickier, since a DFA does not /// have epsilon transitions at all. In this case, they are compiled into the /// automaton itself, at the expense of more states than what would be required /// without an assertion. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Look { /// Match the beginning of text. Specifically, this matches at the starting /// position of the input. Start = 1 << 0, /// Match the end of text. Specifically, this matches at the ending /// position of the input. End = 1 << 1, /// Match the beginning of a line or the beginning of text. Specifically, /// this matches at the starting position of the input, or at the position /// immediately following a `\n` character. StartLF = 1 << 2, /// Match the end of a line or the end of text. Specifically, this matches /// at the end position of the input, or at the position immediately /// preceding a `\n` character. EndLF = 1 << 3, /// Match the beginning of a line or the beginning of text. Specifically, /// this matches at the starting position of the input, or at the position /// immediately following either a `\r` or `\n` character, but never after /// a `\r` when a `\n` follows. StartCRLF = 1 << 4, /// Match the end of a line or the end of text. Specifically, this matches /// at the end position of the input, or at the position immediately /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` /// precedes it. EndCRLF = 1 << 5, /// Match an ASCII-only word boundary. That is, this matches a position /// where the left adjacent character and right adjacent character /// correspond to a word and non-word or a non-word and word character. WordAscii = 1 << 6, /// Match an ASCII-only negation of a word boundary. WordAsciiNegate = 1 << 7, /// Match a Unicode-aware word boundary. That is, this matches a position /// where the left adjacent character and right adjacent character /// correspond to a word and non-word or a non-word and word character. WordUnicode = 1 << 8, /// Match a Unicode-aware negation of a word boundary. WordUnicodeNegate = 1 << 9, } impl Look { /// Flip the look-around assertion to its equivalent for reverse searches. /// For example, `StartLF` gets translated to `EndLF`. /// /// Some assertions, such as `WordUnicode`, remain the same since they /// match the same positions regardless of the direction of the search. #[inline] pub const fn reversed(self) -> Look { match self { Look::Start => Look::End, Look::End => Look::Start, Look::StartLF => Look::EndLF, Look::EndLF => Look::StartLF, Look::StartCRLF => Look::EndCRLF, Look::EndCRLF => Look::StartCRLF, Look::WordAscii => Look::WordAscii, Look::WordAsciiNegate => Look::WordAsciiNegate, Look::WordUnicode => Look::WordUnicode, Look::WordUnicodeNegate => Look::WordUnicodeNegate, } } /// Return the underlying representation of this look-around enumeration /// as an integer. Giving the return value to the [`Look::from_repr`] /// constructor is guaranteed to return the same look-around variant that /// one started with within a semver compatible release of this crate. #[inline] pub const fn as_repr(self) -> u16 { // AFAIK, 'as' is the only way to zero-cost convert an int enum to an // actual int. self as u16 } /// Given the underlying representation of a `Look` value, return the /// corresponding `Look` value if the representation is valid. Otherwise /// `None` is returned. #[inline] pub const fn from_repr(repr: u16) -> Option<Look> { match repr { 0b00_0000_0001 => Some(Look::Start), 0b00_0000_0010 => Some(Look::End), 0b00_0000_0100 => Some(Look::StartLF), 0b00_0000_1000 => Some(Look::EndLF), 0b00_0001_0000 => Some(Look::StartCRLF), 0b00_0010_0000 => Some(Look::EndCRLF), 0b00_0100_0000 => Some(Look::WordAscii), 0b00_1000_0000 => Some(Look::WordAsciiNegate), 0b01_0000_0000 => Some(Look::WordUnicode), 0b10_0000_0000 => Some(Look::WordUnicodeNegate), _ => None, } } /// Returns a convenient single codepoint representation of this /// look-around assertion. Each assertion is guaranteed to be represented /// by a distinct character. /// /// This is useful for succinctly representing a look-around assertion in /// human friendly but succinct output intended for a programmer working on /// regex internals. #[inline] pub const fn as_char(self) -> char { match self { Look::Start => 'A', Look::End => 'z', Look::StartLF => '^', Look::EndLF => '$', Look::StartCRLF => 'r', Look::EndCRLF => 'R', Look::WordAscii => 'b', Look::WordAsciiNegate => 'B', Look::WordUnicode => '𝛃', Look::WordUnicodeNegate => '𝚩', } } } /// LookSet is a memory-efficient set of look-around assertions. /// /// This is useful for efficiently tracking look-around assertions. For /// example, a [`thompson::NFA`](crate::nfa::thompson::NFA) provides properties /// that return `LookSet`s. #[derive(Clone, Copy, Default, Eq, PartialEq)] pub struct LookSet { /// The underlying representation this set is exposed to make it possible /// to store it somewhere efficiently. The representation is that /// of a bitset, where each assertion occupies bit `i` where `i = /// Look::as_repr()`. /// /// Note that users of this internal representation must permit the full /// range of `u16` values to be represented. For example, even if the /// current implementation only makes use of the 10 least significant bits, /// it may use more bits in a future semver compatible release. pub bits: u16, } impl LookSet { /// Create an empty set of look-around assertions. #[inline] pub fn empty() -> LookSet { LookSet { bits: 0 } } /// Create a full set of look-around assertions. /// /// This set contains all possible look-around assertions. #[inline] pub fn full() -> LookSet { LookSet { bits: !0 } } /// Create a look-around set containing the look-around assertion given. /// /// This is a convenience routine for creating an empty set and inserting /// one look-around assertions. #[inline] pub fn singleton(look: Look) -> LookSet { LookSet::empty().insert(look) } /// Returns the total number of look-around assertions in this set. #[inline] pub fn len(self) -> usize { // OK because max value always fits in a u8, which in turn always // fits in a usize, regardless of target. usize::try_from(self.bits.count_ones()).unwrap() } /// Returns true if and only if this set is empty. #[inline] pub fn is_empty(self) -> bool { self.len() == 0 } /// Returns true if and only if the given look-around assertion is in this /// set. #[inline] pub fn contains(self, look: Look) -> bool { self.bits & look.as_repr() != 0 } /// Returns true if and only if this set contains any anchor assertions. /// This includes both "start/end of haystack" and "start/end of line." #[inline] pub fn contains_anchor(&self) -> bool { self.contains_anchor_haystack() || self.contains_anchor_line() } /// Returns true if and only if this set contains any "start/end of /// haystack" anchors. This doesn't include "start/end of line" anchors. #[inline] pub fn contains_anchor_haystack(&self) -> bool { self.contains(Look::Start) || self.contains(Look::End) } /// Returns true if and only if this set contains any "start/end of line" /// anchors. This doesn't include "start/end of haystack" anchors. This /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors. #[inline] pub fn contains_anchor_line(&self) -> bool { self.contains(Look::StartLF) || self.contains(Look::EndLF) || self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) } /// Returns true if and only if this set contains any "start/end of line" /// anchors that only treat `\n` as line terminators. This does not include /// haystack anchors or CRLF aware line anchors. #[inline] pub fn contains_anchor_lf(&self) -> bool { self.contains(Look::StartLF) || self.contains(Look::EndLF) } /// Returns true if and only if this set contains any "start/end of line" /// anchors that are CRLF-aware. This doesn't include "start/end of /// haystack" or "start/end of line-feed" anchors. #[inline] pub fn contains_anchor_crlf(&self) -> bool { self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) } /// Returns true if and only if this set contains any word boundary or /// negated word boundary assertions. This include both Unicode and ASCII /// word boundaries. #[inline] pub fn contains_word(self) -> bool { self.contains_word_unicode() || self.contains_word_ascii() } /// Returns true if and only if this set contains any Unicode word boundary /// or negated Unicode word boundary assertions. #[inline] pub fn contains_word_unicode(self) -> bool { self.contains(Look::WordUnicode) || self.contains(Look::WordUnicodeNegate) } /// Returns true if and only if this set contains any ASCII word boundary /// or negated ASCII word boundary assertions. #[inline] pub fn contains_word_ascii(self) -> bool { self.contains(Look::WordAscii) || self.contains(Look::WordAsciiNegate) } /// Returns an iterator over all of the look-around assertions in this set. #[inline] pub fn iter(self) -> LookSetIter { LookSetIter { set: self } } /// Return a new set that is equivalent to the original, but with the given /// assertion added to it. If the assertion is already in the set, then the /// returned set is equivalent to the original. #[inline] pub fn insert(self, look: Look) -> LookSet { LookSet { bits: self.bits | look.as_repr() } } /// Updates this set in place with the result of inserting the given /// assertion into this set. #[inline] pub fn set_insert(&mut self, look: Look) { *self = self.insert(look); } /// Return a new set that is equivalent to the original, but with the given /// assertion removed from it. If the assertion is not in the set, then the /// returned set is equivalent to the original. #[inline] pub fn remove(self, look: Look) -> LookSet { LookSet { bits: self.bits & !look.as_repr() } } /// Updates this set in place with the result of removing the given /// assertion from this set. #[inline] pub fn set_remove(&mut self, look: Look) { *self = self.remove(look); } /// Returns a new set that is the result of subtracting the given set from /// this set. #[inline] pub fn subtract(self, other: LookSet) -> LookSet { LookSet { bits: self.bits & !other.bits } } /// Updates this set in place with the result of subtracting the given set /// from this set. #[inline] pub fn set_subtract(&mut self, other: LookSet) { *self = self.subtract(other); } /// Returns a new set that is the union of this and the one given. #[inline] pub fn union(self, other: LookSet) -> LookSet { LookSet { bits: self.bits | other.bits } } /// Updates this set in place with the result of unioning it with the one /// given. #[inline] pub fn set_union(&mut self, other: LookSet) { *self = self.union(other); } /// Returns a new set that is the intersection of this and the one given. #[inline] pub fn intersect(self, other: LookSet) -> LookSet { LookSet { bits: self.bits & other.bits } } /// Updates this set in place with the result of intersecting it with the /// one given. #[inline] pub fn set_intersect(&mut self, other: LookSet) { *self = self.intersect(other); } /// Return a `LookSet` from the slice given as a native endian 16-bit /// integer. /// /// # Panics /// /// This panics if `slice.len() < 2`. #[inline] pub fn read_repr(slice: &[u8]) -> LookSet { let bits = u16::from_ne_bytes(slice[..2].try_into().unwrap()); LookSet { bits } } /// Write a `LookSet` as a native endian 16-bit integer to the beginning /// of the slice given. /// /// # Panics /// /// This panics if `slice.len() < 2`. #[inline] pub fn write_repr(self, slice: &mut [u8]) { let raw = self.bits.to_ne_bytes(); slice[0] = raw[0]; slice[1] = raw[1]; } /// Checks that all assertions in this set can be matched. /// /// Some assertions, such as Unicode word boundaries, require optional (but /// enabled by default) tables that may not be available. If there are /// assertions in this set that require tables that are not available, then /// this will return an error. /// /// Specifically, this returns an error when the the /// `unicode-word-boundary` feature is _not_ enabled _and_ this set /// contains a Unicode word boundary assertion. /// /// It can be useful to use this on the result of /// [`NFA::look_set_any`](crate::nfa::thompson::NFA::look_set_any) /// when building a matcher engine to ensure methods like /// [`LookMatcher::matches_set`] do not panic at search time. pub fn available(self) -> Result<(), UnicodeWordBoundaryError> { if self.contains_word_unicode() { UnicodeWordBoundaryError::check()?; } Ok(()) } } impl core::fmt::Debug for LookSet { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { if self.is_empty() { return write!(f, "∅"); } for look in self.iter() { write!(f, "{}", look.as_char())?; } Ok(()) } } /// An iterator over all look-around assertions in a [`LookSet`]. /// /// This iterator is created by [`LookSet::iter`]. #[derive(Clone, Debug)] pub struct LookSetIter { set: LookSet, } impl Iterator for LookSetIter { type Item = Look; #[inline] fn next(&mut self) -> Option<Look> { if self.set.is_empty() { return None; } // We'll never have more than u8::MAX distinct look-around assertions, // so 'repr' will always fit into a u16. let repr = u16::try_from(self.set.bits.trailing_zeros()).unwrap(); let look = Look::from_repr(1 << repr)?; self.set = self.set.remove(look); Some(look) } } /// A matcher for look-around assertions. /// /// This matcher permits configuring aspects of how look-around assertions are /// matched. /// /// # Example /// /// A `LookMatcher` can change the line terminator used for matching multi-line /// anchors such as `(?m:^)` and `(?m:$)`. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, pikevm::PikeVM}, /// util::look::LookMatcher, /// Match, Input, /// }; /// /// let mut lookm = LookMatcher::new(); /// lookm.set_line_terminator(b'\x00'); /// /// let re = PikeVM::builder() /// .thompson(thompson::Config::new().look_matcher(lookm)) /// .build(r"(?m)^[a-z]+$")?; /// let mut cache = re.create_cache(); /// /// // Multi-line assertions now use NUL as a terminator. /// assert_eq!( /// Some(Match::must(0, 1..4)), /// re.find(&mut cache, b"\x00abc\x00"), /// ); /// // ... and \n is no longer recognized as a terminator. /// assert_eq!( /// None, /// re.find(&mut cache, b"\nabc\n"), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct LookMatcher { lineterm: DebugByte, } impl LookMatcher { /// Creates a new default matcher for look-around assertions. pub fn new() -> LookMatcher { LookMatcher { lineterm: DebugByte(b'\n') } } /// Sets the line terminator for use with `(?m:^)` and `(?m:$)`. /// /// Namely, instead of `^` matching after `\n` and `$` matching immediately /// before a `\n`, this will cause it to match after and before the byte /// given. /// /// It can occasionally be useful to use this to configure the line /// terminator to the NUL byte when searching binary data. /// /// Note that this does not apply to CRLF-aware line anchors such as /// `(?Rm:^)` and `(?Rm:$)`. CRLF-aware line anchors are hard-coded to /// use `\r` and `\n`. pub fn set_line_terminator(&mut self, byte: u8) -> &mut LookMatcher { self.lineterm.0 = byte; self } /// Returns the line terminator that was configured for this matcher. /// /// If no line terminator was configured, then this returns `\n`. /// /// Note that the line terminator should only be used for matching `(?m:^)` /// and `(?m:$)` assertions. It specifically should _not_ be used for /// matching the CRLF aware assertions `(?Rm:^)` and `(?Rm:$)`. pub fn get_line_terminator(&self) -> u8 { self.lineterm.0 } /// Returns true when the position `at` in `haystack` satisfies the given /// look-around assertion. /// /// # Panics /// /// This panics when testing any Unicode word boundary assertion in this /// set and when the Unicode word data is not available. Specifically, this /// only occurs when the `unicode-word-boundary` feature is not enabled. /// /// Since it's generally expected that this routine is called inside of /// a matching engine, callers should check the error condition when /// building the matching engine. If there is a Unicode word boundary /// in the matcher and the data isn't available, then the matcher should /// fail to build. /// /// Callers can check the error condition with [`LookSet::available`]. /// /// This also may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn matches(&self, look: Look, haystack: &[u8], at: usize) -> bool { self.matches_inline(look, haystack, at) } /// Like `matches`, but forcefully inlined. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn matches_inline( &self, look: Look, haystack: &[u8], at: usize, ) -> bool { match look { Look::Start => self.is_start(haystack, at), Look::End => self.is_end(haystack, at), Look::StartLF => self.is_start_lf(haystack, at), Look::EndLF => self.is_end_lf(haystack, at), Look::StartCRLF => self.is_start_crlf(haystack, at), Look::EndCRLF => self.is_end_crlf(haystack, at), Look::WordAscii => self.is_word_ascii(haystack, at), Look::WordAsciiNegate => self.is_word_ascii_negate(haystack, at), Look::WordUnicode => self.is_word_unicode(haystack, at).unwrap(), Look::WordUnicodeNegate => { self.is_word_unicode_negate(haystack, at).unwrap() } } } /// Returns true when _all_ of the assertions in the given set match at the /// given position in the haystack. /// /// # Panics /// /// This panics when testing any Unicode word boundary assertion in this /// set and when the Unicode word data is not available. Specifically, this /// only occurs when the `unicode-word-boundary` feature is not enabled. /// /// Since it's generally expected that this routine is called inside of /// a matching engine, callers should check the error condition when /// building the matching engine. If there is a Unicode word boundary /// in the matcher and the data isn't available, then the matcher should /// fail to build. /// /// Callers can check the error condition with [`LookSet::available`]. /// /// This also may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn matches_set( &self, set: LookSet, haystack: &[u8], at: usize, ) -> bool { self.matches_set_inline(set, haystack, at) } /// Like `LookSet::matches`, but forcefully inlined for perf. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn matches_set_inline( &self, set: LookSet, haystack: &[u8], at: usize, ) -> bool { // This used to luse LookSet::iter with Look::matches on each element, // but that proved to be quite diastrous for perf. The manual "if // the set has this assertion, check it" turns out to be quite a bit // faster. if set.contains(Look::Start) { if !self.is_start(haystack, at) { return false; } } if set.contains(Look::End) { if !self.is_end(haystack, at) { return false; } } if set.contains(Look::StartLF) { if !self.is_start_lf(haystack, at) { return false; } } if set.contains(Look::EndLF) { if !self.is_end_lf(haystack, at) { return false; } } if set.contains(Look::StartCRLF) { if !self.is_start_crlf(haystack, at) { return false; } } if set.contains(Look::EndCRLF) { if !self.is_end_crlf(haystack, at) { return false; } } if set.contains(Look::WordAscii) { if !self.is_word_ascii(haystack, at) { return false; } } if set.contains(Look::WordAsciiNegate) { if !self.is_word_ascii_negate(haystack, at) { return false; } } if set.contains(Look::WordUnicode) { if !self.is_word_unicode(haystack, at).unwrap() { return false; } } if set.contains(Look::WordUnicodeNegate) { if !self.is_word_unicode_negate(haystack, at).unwrap() { return false; } } true } /// Split up the given byte classes into equivalence classes in a way that /// is consistent with this look-around assertion. #[cfg(feature = "alloc")] pub(crate) fn add_to_byteset( &self, look: Look, set: &mut crate::util::alphabet::ByteClassSet, ) { match look { Look::Start | Look::End => {} Look::StartLF | Look::EndLF => { set.set_range(self.lineterm.0, self.lineterm.0); } Look::StartCRLF | Look::EndCRLF => { set.set_range(b'\r', b'\r'); set.set_range(b'\n', b'\n'); } Look::WordAscii | Look::WordAsciiNegate | Look::WordUnicode | Look::WordUnicodeNegate => { // We need to mark all ranges of bytes whose pairs result in // evaluating \b differently. This isn't technically correct // for Unicode word boundaries, but DFAs can't handle those // anyway, and thus, the byte classes don't need to either // since they are themselves only used in DFAs. // // FIXME: It seems like the calls to 'set_range' here are // completely invariant, which means we could just hard-code // them here without needing to write a loop. And we only need // to do this dance at most once per regex. // // FIXME: Is this correct for \B? let iswb = utf8::is_word_byte; // This unwrap is OK because we guard every use of 'asu8' with // a check that the input is <= 255. let asu8 = |b: u16| u8::try_from(b).unwrap(); let mut b1: u16 = 0; let mut b2: u16; while b1 <= 255 { b2 = b1 + 1; while b2 <= 255 && iswb(asu8(b1)) == iswb(asu8(b2)) { b2 += 1; } // The guards above guarantee that b2 can never get any // bigger. assert!(b2 <= 256); // Subtracting 1 from b2 is always OK because it is always // at least 1 greater than b1, and the assert above // guarantees that the asu8 conversion will succeed. set.set_range(asu8(b1), asu8(b2.checked_sub(1).unwrap())); b1 = b2; } } } } /// Returns true when [`Look::Start`] is satisfied `at` the given position /// in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_start(&self, _haystack: &[u8], at: usize) -> bool { at == 0 } /// Returns true when [`Look::End`] is satisfied `at` the given position in /// `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_end(&self, haystack: &[u8], at: usize) -> bool { at == haystack.len() } /// Returns true when [`Look::StartLF`] is satisfied `at` the given /// position in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_start_lf(&self, haystack: &[u8], at: usize) -> bool { self.is_start(haystack, at) || haystack[at - 1] == self.lineterm.0 } /// Returns true when [`Look::EndLF`] is satisfied `at` the given position /// in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_end_lf(&self, haystack: &[u8], at: usize) -> bool { self.is_end(haystack, at) || haystack[at] == self.lineterm.0 } /// Returns true when [`Look::StartCRLF`] is satisfied `at` the given /// position in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_start_crlf(&self, haystack: &[u8], at: usize) -> bool { self.is_start(haystack, at) || haystack[at - 1] == b'\n' || (haystack[at - 1] == b'\r' && (at >= haystack.len() || haystack[at] != b'\n')) } /// Returns true when [`Look::EndCRLF`] is satisfied `at` the given /// position in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_end_crlf(&self, haystack: &[u8], at: usize) -> bool { self.is_end(haystack, at) || haystack[at] == b'\r' || (haystack[at] == b'\n' && (at == 0 || haystack[at - 1] != b'\r')) } /// Returns true when [`Look::WordAscii`] is satisfied `at` the given /// position in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_word_ascii(&self, haystack: &[u8], at: usize) -> bool { let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); let word_after = at < haystack.len() && utf8::is_word_byte(haystack[at]); word_before != word_after } /// Returns true when [`Look::WordAsciiNegate`] is satisfied `at` the given /// position in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. #[inline] pub fn is_word_ascii_negate(&self, haystack: &[u8], at: usize) -> bool { !self.is_word_ascii(haystack, at) } /// Returns true when [`Look::WordUnicode`] is satisfied `at` the given /// position in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. /// /// # Errors /// /// This returns an error when Unicode word boundary tables /// are not available. Specifically, this only occurs when the /// `unicode-word-boundary` feature is not enabled. #[inline] pub fn is_word_unicode( &self, haystack: &[u8], at: usize, ) -> Result<bool, UnicodeWordBoundaryError> { let word_before = is_word_char::rev(haystack, at)?; let word_after = is_word_char::fwd(haystack, at)?; Ok(word_before != word_after) } /// Returns true when [`Look::WordUnicodeNegate`] is satisfied `at` the /// given position in `haystack`. /// /// # Panics /// /// This may panic when `at > haystack.len()`. Note that `at == /// haystack.len()` is legal and guaranteed not to panic. /// /// # Errors /// /// This returns an error when Unicode word boundary tables /// are not available. Specifically, this only occurs when the /// `unicode-word-boundary` feature is not enabled. #[inline] pub fn is_word_unicode_negate( &self, haystack: &[u8], at: usize, ) -> Result<bool, UnicodeWordBoundaryError> { // This is pretty subtle. Why do we need to do UTF-8 decoding here? // Well... at time of writing, the is_word_char_{fwd,rev} routines will // only return true if there is a valid UTF-8 encoding of a "word" // codepoint, and false in every other case (including invalid UTF-8). // This means that in regions of invalid UTF-8 (which might be a // subset of valid UTF-8!), it would result in \B matching. While this // would be questionable in the context of truly invalid UTF-8, it is // *certainly* wrong to report match boundaries that split the encoding // of a codepoint. So to work around this, we ensure that we can decode // a codepoint on either side of `at`. If either direction fails, then // we don't permit \B to match at all. // // Now, this isn't exactly optimal from a perf perspective. We could // try and detect this in is_word_char::{fwd,rev}, but it's not clear // if it's worth it. \B is, after all, rarely used. Even worse, // is_word_char::{fwd,rev} could do its own UTF-8 decoding, and so this // will wind up doing UTF-8 decoding twice. Owch. We could fix this // with more code complexity, but it just doesn't feel worth it for \B. // // And in particular, we do *not* have to do this with \b, because \b // *requires* that at least one side of `at` be a "word" codepoint, // which in turn implies one side of `at` must be valid UTF-8. This in // turn implies that \b can never split a valid UTF-8 encoding of a // codepoint. In the case where one side of `at` is truly invalid UTF-8 // and the other side IS a word codepoint, then we want \b to match // since it represents a valid UTF-8 boundary. It also makes sense. For // example, you'd want \b\w+\b to match 'abc' in '\xFFabc\xFF'. // // Note also that this is not just '!is_word_unicode(..)' like it is // for the ASCII case. For example, neither \b nor \B is satisfied // within invalid UTF-8 sequences. let word_before = at > 0 && match utf8::decode_last(&haystack[..at]) { None | Some(Err(_)) => return Ok(false), Some(Ok(_)) => is_word_char::rev(haystack, at)?, }; let word_after = at < haystack.len() && match utf8::decode(&haystack[at..]) { None | Some(Err(_)) => return Ok(false), Some(Ok(_)) => is_word_char::fwd(haystack, at)?, }; Ok(word_before == word_after) } } impl Default for LookMatcher { fn default() -> LookMatcher { LookMatcher::new() } } /// An error that occurs when the Unicode-aware `\w` class is unavailable. /// /// This error can occur when the data tables necessary for the Unicode aware /// Perl character class `\w` are unavailable. The `\w` class is used to /// determine whether a codepoint is considered a word character or not when /// determining whether a Unicode aware `\b` (or `\B`) matches at a particular /// position. /// /// This error can only occur when the `unicode-word-boundary` feature is /// disabled. #[derive(Clone, Debug)] pub struct UnicodeWordBoundaryError(()); impl UnicodeWordBoundaryError { #[cfg(not(feature = "unicode-word-boundary"))] pub(crate) fn new() -> UnicodeWordBoundaryError { UnicodeWordBoundaryError(()) } /// Returns an error if and only if Unicode word boundary data is /// unavailable. pub fn check() -> Result<(), UnicodeWordBoundaryError> { is_word_char::check() } } #[cfg(feature = "std")] impl std::error::Error for UnicodeWordBoundaryError {} impl core::fmt::Display for UnicodeWordBoundaryError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( f, "Unicode-aware \\b and \\B are unavailable because the \ requisite data tables are missing, please enable the \ unicode-word-boundary feature" ) } } // Below are FOUR different ways for checking whether whether a "word" // codepoint exists at a particular position in the haystack. The four // different approaches are, in order of preference: // // 1. Parse '\w', convert to an NFA, convert to a fully compiled DFA on the // first call, and then use that DFA for all subsequent calls. // 2. Do UTF-8 decoding and use regex_syntax::is_word_character if available. // 3. Do UTF-8 decoding and use our own 'perl_word' table. // 4. Return an error. // // The reason for all of these approaches is a combination of perf and // permitting one to build regex-automata without the Unicode data necessary // for handling Unicode-aware word boundaries. (In which case, '(?-u:\b)' would // still work.) // // The DFA approach is the fastest, but it requires the regex parser, the // NFA compiler, the DFA builder and the DFA search runtime. That's a lot to // bring in, but if it's available, it's (probably) the best we can do. // // Approaches (2) and (3) are effectively equivalent, but (2) reuses the // data in regex-syntax and avoids duplicating it in regex-automata. // // Finally, (4) unconditionally returns an error since the requisite data isn't // available anywhere. // // There are actually more approaches possible that we didn't implement. For // example, if the DFA builder is available but the syntax parser is not, we // could technically hand construct our own NFA from the 'perl_word' data // table. But to avoid some pretty hairy code duplication, we would in turn // need to pull the UTF-8 compiler out of the NFA compiler. Yikes. // // A possibly more sensible alternative is to use a lazy DFA when the full // DFA builder isn't available... // // Yet another choice would be to build the full DFA and then embed it into the // source. Then we'd only need to bring in the DFA search runtime, which is // considerably smaller than the DFA builder code. The problem here is that the // Debian people have spooked me[1] into avoiding cyclic dependencies. Namely, // we'd need to build regex-cli, which depends on regex-automata in order to // build some part of regex-automata. But to be honest, something like this has // to be allowed somehow? I just don't know what the right process is. // // There are perhaps other choices as well. Why did I stop at these 4? Because // I wanted to preserve my sanity. I suspect I'll wind up adding the lazy DFA // approach eventually, as the benefits of the DFA approach are somewhat // compelling. The 'boundary-words-holmes' benchmark tests this: // // $ regex-cli bench measure -f boundary-words-holmes -e pikevm > dfa.csv // // Then I changed the code below so that the util/unicode_data/perl_word table // was used and re-ran the benchmark: // // $ regex-cli bench measure -f boundary-words-holmes -e pikevm > table.csv // // And compared them: // // $ regex-cli bench diff dfa.csv table.csv // benchmark engine dfa table // --------- ------ --- ----- // internal/count/boundary-words-holmes regex/automata/pikevm 18.6 MB/s 12.9 MB/s // // Which is a nice improvement. // // UPDATE: It turns out that it takes approximately 22ms to build the reverse // DFA for \w. (And about 3ms for the forward DFA.) It's probably not much in // the grand scheme things, but that is a significant latency cost. So I'm not // sure that's a good idea. I then tried using a lazy DFA instead, and that // eliminated the overhead, but since the lazy DFA requires mutable working // memory, that requires introducing a 'Cache' for every simultaneous call. // // I ended up deciding for now to just keep the "UTF-8 decode and check the // table." The DFA and lazy DFA approaches are still below, but commented out. // // [1]: https://github.com/BurntSushi/ucd-generate/issues/11 /* /// A module that looks for word codepoints using lazy DFAs. #[cfg(all( feature = "unicode-word-boundary", feature = "syntax", feature = "unicode-perl", feature = "hybrid" ))] mod is_word_char { use alloc::vec::Vec; use crate::{ hybrid::dfa::{Cache, DFA}, nfa::thompson::NFA, util::{lazy::Lazy, pool::Pool, primitives::StateID}, Anchored, Input, }; pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { Ok(()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn fwd( haystack: &[u8], mut at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { static WORD: Lazy<DFA> = Lazy::new(|| DFA::new(r"\w").unwrap()); static CACHE: Lazy<Pool<Cache>> = Lazy::new(|| Pool::new(|| WORD.create_cache())); let dfa = Lazy::get(&WORD); let mut cache = Lazy::get(&CACHE).get(); let mut sid = dfa .start_state_forward( &mut cache, &Input::new("").anchored(Anchored::Yes), ) .unwrap(); while at < haystack.len() { let byte = haystack[at]; sid = dfa.next_state(&mut cache, sid, byte).unwrap(); at += 1; if sid.is_tagged() { if sid.is_match() { return Ok(true); } else if sid.is_dead() { return Ok(false); } } } Ok(dfa.next_eoi_state(&mut cache, sid).unwrap().is_match()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn rev( haystack: &[u8], mut at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { static WORD: Lazy<DFA> = Lazy::new(|| { DFA::builder() .thompson(NFA::config().reverse(true)) .build(r"\w") .unwrap() }); static CACHE: Lazy<Pool<Cache>> = Lazy::new(|| Pool::new(|| WORD.create_cache())); let dfa = Lazy::get(&WORD); let mut cache = Lazy::get(&CACHE).get(); let mut sid = dfa .start_state_reverse( &mut cache, &Input::new("").anchored(Anchored::Yes), ) .unwrap(); while at > 0 { at -= 1; let byte = haystack[at]; sid = dfa.next_state(&mut cache, sid, byte).unwrap(); if sid.is_tagged() { if sid.is_match() { return Ok(true); } else if sid.is_dead() { return Ok(false); } } } Ok(dfa.next_eoi_state(&mut cache, sid).unwrap().is_match()) } } */ /* /// A module that looks for word codepoints using fully compiled DFAs. #[cfg(all( feature = "unicode-word-boundary", feature = "syntax", feature = "unicode-perl", feature = "dfa-build" ))] mod is_word_char { use alloc::vec::Vec; use crate::{ dfa::{dense::DFA, Automaton, StartKind}, nfa::thompson::NFA, util::{lazy::Lazy, primitives::StateID}, Anchored, Input, }; pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { Ok(()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn fwd( haystack: &[u8], mut at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { static WORD: Lazy<(DFA<Vec<u32>>, StateID)> = Lazy::new(|| { let dfa = DFA::builder() .configure(DFA::config().start_kind(StartKind::Anchored)) .build(r"\w") .unwrap(); // OK because our regex has no look-around. let start_id = dfa.universal_start_state(Anchored::Yes).unwrap(); (dfa, start_id) }); let &(ref dfa, mut sid) = Lazy::get(&WORD); while at < haystack.len() { let byte = haystack[at]; sid = dfa.next_state(sid, byte); at += 1; if dfa.is_special_state(sid) { if dfa.is_match_state(sid) { return Ok(true); } else if dfa.is_dead_state(sid) { return Ok(false); } } } Ok(dfa.is_match_state(dfa.next_eoi_state(sid))) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn rev( haystack: &[u8], mut at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { static WORD: Lazy<(DFA<Vec<u32>>, StateID)> = Lazy::new(|| { let dfa = DFA::builder() .configure(DFA::config().start_kind(StartKind::Anchored)) // From ad hoc measurements, it looks like setting // shrink==false is slightly faster than shrink==true. I kind // of feel like this indicates that shrinking is probably a // failure, although it can help in some cases. Sigh. .thompson(NFA::config().reverse(true).shrink(false)) .build(r"\w") .unwrap(); // OK because our regex has no look-around. let start_id = dfa.universal_start_state(Anchored::Yes).unwrap(); (dfa, start_id) }); let &(ref dfa, mut sid) = Lazy::get(&WORD); while at > 0 { at -= 1; let byte = haystack[at]; sid = dfa.next_state(sid, byte); if dfa.is_special_state(sid) { if dfa.is_match_state(sid) { return Ok(true); } else if dfa.is_dead_state(sid) { return Ok(false); } } } Ok(dfa.is_match_state(dfa.next_eoi_state(sid))) } } */ /// A module that looks for word codepoints using regex-syntax's data tables. #[cfg(all( feature = "unicode-word-boundary", feature = "syntax", feature = "unicode-perl", ))] mod is_word_char { use regex_syntax::try_is_word_character; use crate::util::utf8; pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { Ok(()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn fwd( haystack: &[u8], at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { Ok(match utf8::decode(&haystack[at..]) { None | Some(Err(_)) => false, Some(Ok(ch)) => try_is_word_character(ch).expect( "since unicode-word-boundary, syntax and unicode-perl \ are all enabled, it is expected that \ try_is_word_character succeeds", ), }) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn rev( haystack: &[u8], at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { Ok(match utf8::decode_last(&haystack[..at]) { None | Some(Err(_)) => false, Some(Ok(ch)) => try_is_word_character(ch).expect( "since unicode-word-boundary, syntax and unicode-perl \ are all enabled, it is expected that \ try_is_word_character succeeds", ), }) } } /// A module that looks for word codepoints using regex-automata's data tables /// (which are only compiled when regex-syntax's tables aren't available). /// /// Note that the cfg should match the one in src/util/unicode_data/mod.rs for /// perl_word. #[cfg(all( feature = "unicode-word-boundary", not(all(feature = "syntax", feature = "unicode-perl")), ))] mod is_word_char { use crate::util::utf8; pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { Ok(()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn fwd( haystack: &[u8], at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { Ok(match utf8::decode(&haystack[at..]) { None | Some(Err(_)) => false, Some(Ok(ch)) => is_word_character(ch), }) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn rev( haystack: &[u8], at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { Ok(match utf8::decode_last(&haystack[..at]) { None | Some(Err(_)) => false, Some(Ok(ch)) => is_word_character(ch), }) } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_word_character(c: char) -> bool { use crate::util::{unicode_data::perl_word::PERL_WORD, utf8}; // MSRV(1.59): Use 'u8::try_from(c)' instead. if u8::try_from(u32::from(c)).map_or(false, utf8::is_word_byte) { return true; } PERL_WORD .binary_search_by(|&(start, end)| { use core::cmp::Ordering; if start <= c && c <= end { Ordering::Equal } else if start > c { Ordering::Greater } else { Ordering::Less } }) .is_ok() } } /// A module that always returns an error if Unicode word boundaries are /// disabled. When this feature is disabled, then regex-automata will not /// include its own data tables even if regex-syntax is disabled. #[cfg(not(feature = "unicode-word-boundary"))] mod is_word_char { pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { Err(super::UnicodeWordBoundaryError::new()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn fwd( _bytes: &[u8], _at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { Err(super::UnicodeWordBoundaryError::new()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(super) fn rev( _bytes: &[u8], _at: usize, ) -> Result<bool, super::UnicodeWordBoundaryError> { Err(super::UnicodeWordBoundaryError::new()) } } #[cfg(test)] mod tests { use super::*; macro_rules! testlook { ($look:expr, $haystack:expr, $at:expr) => { LookMatcher::default().matches($look, $haystack.as_bytes(), $at) }; } #[test] fn look_matches_start_line() { let look = Look::StartLF; assert!(testlook!(look, "", 0)); assert!(testlook!(look, "\n", 0)); assert!(testlook!(look, "\n", 1)); assert!(testlook!(look, "a", 0)); assert!(testlook!(look, "\na", 1)); assert!(!testlook!(look, "a", 1)); assert!(!testlook!(look, "a\na", 1)); } #[test] fn look_matches_end_line() { let look = Look::EndLF; assert!(testlook!(look, "", 0)); assert!(testlook!(look, "\n", 1)); assert!(testlook!(look, "\na", 0)); assert!(testlook!(look, "\na", 2)); assert!(testlook!(look, "a\na", 1)); assert!(!testlook!(look, "a", 0)); assert!(!testlook!(look, "\na", 1)); assert!(!testlook!(look, "a\na", 0)); assert!(!testlook!(look, "a\na", 2)); } #[test] fn look_matches_start_text() { let look = Look::Start; assert!(testlook!(look, "", 0)); assert!(testlook!(look, "\n", 0)); assert!(testlook!(look, "a", 0)); assert!(!testlook!(look, "\n", 1)); assert!(!testlook!(look, "\na", 1)); assert!(!testlook!(look, "a", 1)); assert!(!testlook!(look, "a\na", 1)); } #[test] fn look_matches_end_text() { let look = Look::End; assert!(testlook!(look, "", 0)); assert!(testlook!(look, "\n", 1)); assert!(testlook!(look, "\na", 2)); assert!(!testlook!(look, "\na", 0)); assert!(!testlook!(look, "a\na", 1)); assert!(!testlook!(look, "a", 0)); assert!(!testlook!(look, "\na", 1)); assert!(!testlook!(look, "a\na", 0)); assert!(!testlook!(look, "a\na", 2)); } #[test] #[cfg(all(not(miri), feature = "unicode-word-boundary"))] fn look_matches_word_unicode() { let look = Look::WordUnicode; // \xF0\x9D\x9B\x83 = 𝛃 (in \w) // \xF0\x90\x86\x80 = 𐆀 (not in \w) // Simple ASCII word boundaries. assert!(testlook!(look, "a", 0)); assert!(testlook!(look, "a", 1)); assert!(testlook!(look, "a ", 1)); assert!(testlook!(look, " a ", 1)); assert!(testlook!(look, " a ", 2)); // Unicode word boundaries with a non-ASCII codepoint. assert!(testlook!(look, "𝛃", 0)); assert!(testlook!(look, "𝛃", 4)); assert!(testlook!(look, "𝛃 ", 4)); assert!(testlook!(look, " 𝛃 ", 1)); assert!(testlook!(look, " 𝛃 ", 5)); // Unicode word boundaries between non-ASCII codepoints. assert!(testlook!(look, "𝛃𐆀", 0)); assert!(testlook!(look, "𝛃𐆀", 4)); // Non word boundaries for ASCII. assert!(!testlook!(look, "", 0)); assert!(!testlook!(look, "ab", 1)); assert!(!testlook!(look, "a ", 2)); assert!(!testlook!(look, " a ", 0)); assert!(!testlook!(look, " a ", 3)); // Non word boundaries with a non-ASCII codepoint. assert!(!testlook!(look, "𝛃b", 4)); assert!(!testlook!(look, "𝛃 ", 5)); assert!(!testlook!(look, " 𝛃 ", 0)); assert!(!testlook!(look, " 𝛃 ", 6)); assert!(!testlook!(look, "𝛃", 1)); assert!(!testlook!(look, "𝛃", 2)); assert!(!testlook!(look, "𝛃", 3)); // Non word boundaries with non-ASCII codepoints. assert!(!testlook!(look, "𝛃𐆀", 1)); assert!(!testlook!(look, "𝛃𐆀", 2)); assert!(!testlook!(look, "𝛃𐆀", 3)); assert!(!testlook!(look, "𝛃𐆀", 5)); assert!(!testlook!(look, "𝛃𐆀", 6)); assert!(!testlook!(look, "𝛃𐆀", 7)); assert!(!testlook!(look, "𝛃𐆀", 8)); } #[test] fn look_matches_word_ascii() { let look = Look::WordAscii; // \xF0\x9D\x9B\x83 = 𝛃 (in \w) // \xF0\x90\x86\x80 = 𐆀 (not in \w) // Simple ASCII word boundaries. assert!(testlook!(look, "a", 0)); assert!(testlook!(look, "a", 1)); assert!(testlook!(look, "a ", 1)); assert!(testlook!(look, " a ", 1)); assert!(testlook!(look, " a ", 2)); // Unicode word boundaries with a non-ASCII codepoint. Since this is // an ASCII word boundary, none of these match. assert!(!testlook!(look, "𝛃", 0)); assert!(!testlook!(look, "𝛃", 4)); assert!(!testlook!(look, "𝛃 ", 4)); assert!(!testlook!(look, " 𝛃 ", 1)); assert!(!testlook!(look, " 𝛃 ", 5)); // Unicode word boundaries between non-ASCII codepoints. Again, since // this is an ASCII word boundary, none of these match. assert!(!testlook!(look, "𝛃𐆀", 0)); assert!(!testlook!(look, "𝛃𐆀", 4)); // Non word boundaries for ASCII. assert!(!testlook!(look, "", 0)); assert!(!testlook!(look, "ab", 1)); assert!(!testlook!(look, "a ", 2)); assert!(!testlook!(look, " a ", 0)); assert!(!testlook!(look, " a ", 3)); // Non word boundaries with a non-ASCII codepoint. assert!(testlook!(look, "𝛃b", 4)); assert!(!testlook!(look, "𝛃 ", 5)); assert!(!testlook!(look, " 𝛃 ", 0)); assert!(!testlook!(look, " 𝛃 ", 6)); assert!(!testlook!(look, "𝛃", 1)); assert!(!testlook!(look, "𝛃", 2)); assert!(!testlook!(look, "𝛃", 3)); // Non word boundaries with non-ASCII codepoints. assert!(!testlook!(look, "𝛃𐆀", 1)); assert!(!testlook!(look, "𝛃𐆀", 2)); assert!(!testlook!(look, "𝛃𐆀", 3)); assert!(!testlook!(look, "𝛃𐆀", 5)); assert!(!testlook!(look, "𝛃𐆀", 6)); assert!(!testlook!(look, "𝛃𐆀", 7)); assert!(!testlook!(look, "𝛃𐆀", 8)); } #[test] #[cfg(all(not(miri), feature = "unicode-word-boundary"))] fn look_matches_word_unicode_negate() { let look = Look::WordUnicodeNegate; // \xF0\x9D\x9B\x83 = 𝛃 (in \w) // \xF0\x90\x86\x80 = 𐆀 (not in \w) // Simple ASCII word boundaries. assert!(!testlook!(look, "a", 0)); assert!(!testlook!(look, "a", 1)); assert!(!testlook!(look, "a ", 1)); assert!(!testlook!(look, " a ", 1)); assert!(!testlook!(look, " a ", 2)); // Unicode word boundaries with a non-ASCII codepoint. assert!(!testlook!(look, "𝛃", 0)); assert!(!testlook!(look, "𝛃", 4)); assert!(!testlook!(look, "𝛃 ", 4)); assert!(!testlook!(look, " 𝛃 ", 1)); assert!(!testlook!(look, " 𝛃 ", 5)); // Unicode word boundaries between non-ASCII codepoints. assert!(!testlook!(look, "𝛃𐆀", 0)); assert!(!testlook!(look, "𝛃𐆀", 4)); // Non word boundaries for ASCII. assert!(testlook!(look, "", 0)); assert!(testlook!(look, "ab", 1)); assert!(testlook!(look, "a ", 2)); assert!(testlook!(look, " a ", 0)); assert!(testlook!(look, " a ", 3)); // Non word boundaries with a non-ASCII codepoint. assert!(testlook!(look, "𝛃b", 4)); assert!(testlook!(look, "𝛃 ", 5)); assert!(testlook!(look, " 𝛃 ", 0)); assert!(testlook!(look, " 𝛃 ", 6)); // These don't match because they could otherwise return an offset that // splits the UTF-8 encoding of a codepoint. assert!(!testlook!(look, "𝛃", 1)); assert!(!testlook!(look, "𝛃", 2)); assert!(!testlook!(look, "𝛃", 3)); // Non word boundaries with non-ASCII codepoints. These also don't // match because they could otherwise return an offset that splits the // UTF-8 encoding of a codepoint. assert!(!testlook!(look, "𝛃𐆀", 1)); assert!(!testlook!(look, "𝛃𐆀", 2)); assert!(!testlook!(look, "𝛃𐆀", 3)); assert!(!testlook!(look, "𝛃𐆀", 5)); assert!(!testlook!(look, "𝛃𐆀", 6)); assert!(!testlook!(look, "𝛃𐆀", 7)); // But this one does, since 𐆀 isn't a word codepoint, and 8 is the end // of the haystack. So the "end" of the haystack isn't a word and 𐆀 // isn't a word, thus, \B matches. assert!(testlook!(look, "𝛃𐆀", 8)); } #[test] fn look_matches_word_ascii_negate() { let look = Look::WordAsciiNegate; // \xF0\x9D\x9B\x83 = 𝛃 (in \w) // \xF0\x90\x86\x80 = 𐆀 (not in \w) // Simple ASCII word boundaries. assert!(!testlook!(look, "a", 0)); assert!(!testlook!(look, "a", 1)); assert!(!testlook!(look, "a ", 1)); assert!(!testlook!(look, " a ", 1)); assert!(!testlook!(look, " a ", 2)); // Unicode word boundaries with a non-ASCII codepoint. Since this is // an ASCII word boundary, none of these match. assert!(testlook!(look, "𝛃", 0)); assert!(testlook!(look, "𝛃", 4)); assert!(testlook!(look, "𝛃 ", 4)); assert!(testlook!(look, " 𝛃 ", 1)); assert!(testlook!(look, " 𝛃 ", 5)); // Unicode word boundaries between non-ASCII codepoints. Again, since // this is an ASCII word boundary, none of these match. assert!(testlook!(look, "𝛃𐆀", 0)); assert!(testlook!(look, "𝛃𐆀", 4)); // Non word boundaries for ASCII. assert!(testlook!(look, "", 0)); assert!(testlook!(look, "ab", 1)); assert!(testlook!(look, "a ", 2)); assert!(testlook!(look, " a ", 0)); assert!(testlook!(look, " a ", 3)); // Non word boundaries with a non-ASCII codepoint. assert!(!testlook!(look, "𝛃b", 4)); assert!(testlook!(look, "𝛃 ", 5)); assert!(testlook!(look, " 𝛃 ", 0)); assert!(testlook!(look, " 𝛃 ", 6)); assert!(testlook!(look, "𝛃", 1)); assert!(testlook!(look, "𝛃", 2)); assert!(testlook!(look, "𝛃", 3)); // Non word boundaries with non-ASCII codepoints. assert!(testlook!(look, "𝛃𐆀", 1)); assert!(testlook!(look, "𝛃𐆀", 2)); assert!(testlook!(look, "𝛃𐆀", 3)); assert!(testlook!(look, "𝛃𐆀", 5)); assert!(testlook!(look, "𝛃𐆀", 6)); assert!(testlook!(look, "𝛃𐆀", 7)); assert!(testlook!(look, "𝛃𐆀", 8)); } #[test] fn look_set() { let mut f = LookSet::default(); assert!(!f.contains(Look::Start)); assert!(!f.contains(Look::End)); assert!(!f.contains(Look::StartLF)); assert!(!f.contains(Look::EndLF)); assert!(!f.contains(Look::WordUnicode)); assert!(!f.contains(Look::WordUnicodeNegate)); assert!(!f.contains(Look::WordAscii)); assert!(!f.contains(Look::WordAsciiNegate)); f = f.insert(Look::Start); assert!(f.contains(Look::Start)); f = f.remove(Look::Start); assert!(!f.contains(Look::Start)); f = f.insert(Look::End); assert!(f.contains(Look::End)); f = f.remove(Look::End); assert!(!f.contains(Look::End)); f = f.insert(Look::StartLF); assert!(f.contains(Look::StartLF)); f = f.remove(Look::StartLF); assert!(!f.contains(Look::StartLF)); f = f.insert(Look::EndLF); assert!(f.contains(Look::EndLF)); f = f.remove(Look::EndLF); assert!(!f.contains(Look::EndLF)); f = f.insert(Look::StartCRLF); assert!(f.contains(Look::StartCRLF)); f = f.remove(Look::StartCRLF); assert!(!f.contains(Look::StartCRLF)); f = f.insert(Look::EndCRLF); assert!(f.contains(Look::EndCRLF)); f = f.remove(Look::EndCRLF); assert!(!f.contains(Look::EndCRLF)); f = f.insert(Look::WordUnicode); assert!(f.contains(Look::WordUnicode)); f = f.remove(Look::WordUnicode); assert!(!f.contains(Look::WordUnicode)); f = f.insert(Look::WordUnicodeNegate); assert!(f.contains(Look::WordUnicodeNegate)); f = f.remove(Look::WordUnicodeNegate); assert!(!f.contains(Look::WordUnicodeNegate)); f = f.insert(Look::WordAscii); assert!(f.contains(Look::WordAscii)); f = f.remove(Look::WordAscii); assert!(!f.contains(Look::WordAscii)); f = f.insert(Look::WordAsciiNegate); assert!(f.contains(Look::WordAsciiNegate)); f = f.remove(Look::WordAsciiNegate); assert!(!f.contains(Look::WordAsciiNegate)); } #[test] fn look_set_iter() { let set = LookSet::empty(); assert_eq!(0, set.iter().count()); let set = LookSet::full(); assert_eq!(10, set.iter().count()); let set = LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode); assert_eq!(2, set.iter().count()); let set = LookSet::empty().insert(Look::StartLF); assert_eq!(1, set.iter().count()); let set = LookSet::empty().insert(Look::WordAsciiNegate); assert_eq!(1, set.iter().count()); } #[test] #[cfg(feature = "alloc")] fn look_set_debug() { let res = alloc::format!("{:?}", LookSet::empty()); assert_eq!("∅", res); let res = alloc::format!("{:?}", LookSet::full()); assert_eq!("Az^$rRbB𝛃𝚩", res); } } <file_sep>/testdata/empty.toml [[test]] name = "100" regex = "|b" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "110" regex = "b|" haystack = "abc" matches = [[0, 0], [1, 2], [3, 3]] [[test]] name = "120" regex = "|z" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "130" regex = "z|" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "200" regex = "|" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "210" regex = "||" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "220" regex = "||b" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "230" regex = "b||" haystack = "abc" matches = [[0, 0], [1, 2], [3, 3]] [[test]] name = "240" regex = "||z" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "300" regex = "(?:)|b" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "310" regex = "b|(?:)" haystack = "abc" matches = [[0, 0], [1, 2], [3, 3]] [[test]] name = "320" regex = "(?:|)" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "330" regex = "(?:|)|z" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "400" regex = "a(?:)|b" haystack = "abc" matches = [[0, 1], [1, 2]] [[test]] name = "500" regex = "" haystack = "" matches = [[0, 0]] [[test]] name = "510" regex = "" haystack = "a" matches = [[0, 0], [1, 1]] [[test]] name = "520" regex = "" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "600" regex = '(?:|a)*' haystack = "aaa" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "610" regex = '(?:|a)+' haystack = "aaa" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] <file_sep>/regex-automata/tests/hybrid/suite.rs use { anyhow::Result, regex_automata::{ hybrid::{ dfa::{OverlappingState, DFA}, regex::{self, Regex}, }, nfa::thompson, util::{prefilter::Prefilter, syntax}, Anchored, Input, PatternSet, }, regex_test::{ CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, TestRunner, }, }; use crate::{create_input, suite, untestify_kind}; const EXPANSIONS: &[&str] = &["is_match", "find", "which"]; /// Tests the default configuration of the hybrid NFA/DFA. #[test] fn default() -> Result<()> { let builder = Regex::builder(); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) // Without NFA shrinking, this test blows the default cache capacity. .blacklist("expensive/regression-many-repeat-no-stack-overflow") .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA with prefilters enabled. #[test] fn prefilter() -> Result<()> { let my_compiler = |test: &RegexTest, regexes: &[String]| { // Parse regexes as HIRs so we can get literals to build a prefilter. let mut hirs = vec![]; for pattern in regexes.iter() { hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); } let kind = match untestify_kind(test.match_kind()) { None => return Ok(CompiledRegex::skip()), Some(kind) => kind, }; let pre = Prefilter::from_hirs_prefix(kind, &hirs); let mut builder = Regex::builder(); builder.dfa(DFA::config().prefilter(pre)); compiler(builder)(test, regexes) }; TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) // Without NFA shrinking, this test blows the default cache capacity. .blacklist("expensive/regression-many-repeat-no-stack-overflow") .test_iter(suite()?.iter(), my_compiler) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA with NFA shrinking enabled. /// /// This is *usually* not the configuration one wants for a lazy DFA. NFA /// shrinking is mostly only advantageous when building a full DFA since it /// can sharply decrease the amount of time determinization takes. But NFA /// shrinking is itself otherwise fairly expensive currently. Since a lazy DFA /// has no compilation time (other than for building the NFA of course) before /// executing a search, it's usually worth it to forgo NFA shrinking. /// /// Nevertheless, we test to make sure everything is OK with NFA shrinking. As /// a bonus, there are some tests we don't need to skip because they now fit in /// the default cache capacity. #[test] fn nfa_shrink() -> Result<()> { let mut builder = Regex::builder(); builder.thompson(thompson::Config::new().shrink(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA when 'starts_for_each_pattern' is enabled for all /// tests. #[test] fn starts_for_each_pattern() -> Result<()> { let mut builder = Regex::builder(); builder.dfa(DFA::config().starts_for_each_pattern(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) // Without NFA shrinking, this test blows the default cache capacity. .blacklist("expensive/regression-many-repeat-no-stack-overflow") .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA when 'specialize_start_states' is enabled. #[test] fn specialize_start_states() -> Result<()> { let mut builder = Regex::builder(); builder.dfa(DFA::config().specialize_start_states(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) // Without NFA shrinking, this test blows the default cache capacity. .blacklist("expensive/regression-many-repeat-no-stack-overflow") .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA when byte classes are disabled. /// /// N.B. Disabling byte classes doesn't avoid any indirection at search time. /// All it does is cause every byte value to be its own distinct equivalence /// class. #[test] fn no_byte_classes() -> Result<()> { let mut builder = Regex::builder(); builder.dfa(DFA::config().byte_classes(false)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) // Without NFA shrinking, this test blows the default cache capacity. .blacklist("expensive/regression-many-repeat-no-stack-overflow") .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests that hybrid NFA/DFA never clears its cache for any test with the /// default capacity. /// /// N.B. If a regex suite test is added that causes the cache to be cleared, /// then this should just skip that test. (Which can be done by calling the /// 'blacklist' method on 'TestRunner'.) #[test] fn no_cache_clearing() -> Result<()> { let mut builder = Regex::builder(); builder.dfa(DFA::config().minimum_cache_clear_count(Some(0))); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) // Without NFA shrinking, this test blows the default cache capacity. .blacklist("expensive/regression-many-repeat-no-stack-overflow") .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the hybrid NFA/DFA when the minimum cache capacity is set. #[test] fn min_cache_capacity() -> Result<()> { let mut builder = Regex::builder(); builder .dfa(DFA::config().cache_capacity(0).skip_cache_capacity_check(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } fn compiler( mut builder: regex::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { move |test, regexes| { // Parse regexes as HIRs for some analysis below. let mut hirs = vec![]; for pattern in regexes.iter() { hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); } // Check if our regex contains things that aren't supported by DFAs. // That is, Unicode word boundaries when searching non-ASCII text. if !test.haystack().is_ascii() { for hir in hirs.iter() { if hir.properties().look_set().contains_word_unicode() { return Ok(CompiledRegex::skip()); } } } if !configure_regex_builder(test, &mut builder) { return Ok(CompiledRegex::skip()); } let re = builder.build_many(&regexes)?; let mut cache = re.create_cache(); Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, &mut cache, test) })) } } fn run_test( re: &Regex, cache: &mut regex::Cache, test: &RegexTest, ) -> TestResult { let input = create_input(test); match test.additional_name() { "is_match" => { TestResult::matched(re.is_match(cache, input.earliest(true))) } "find" => match test.search_kind() { SearchKind::Earliest | SearchKind::Leftmost => { let input = input.earliest(test.search_kind() == SearchKind::Earliest); TestResult::matches( re.find_iter(cache, input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }), ) } SearchKind::Overlapping => { try_search_overlapping(re, cache, &input).unwrap() } }, "which" => match test.search_kind() { SearchKind::Earliest | SearchKind::Leftmost => { // There are no "which" APIs for standard searches. TestResult::skip() } SearchKind::Overlapping => { let dfa = re.forward(); let cache = cache.as_parts_mut().0; let mut patset = PatternSet::new(dfa.pattern_len()); dfa.try_which_overlapping_matches(cache, &input, &mut patset) .unwrap(); TestResult::which(patset.iter().map(|p| p.as_usize())) } }, name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Configures the given regex builder with all relevant settings on the given /// regex test. /// /// If the regex test has a setting that is unsupported, then this returns /// false (implying the test should be skipped). fn configure_regex_builder( test: &RegexTest, builder: &mut regex::Builder, ) -> bool { let match_kind = match untestify_kind(test.match_kind()) { None => return false, Some(k) => k, }; let mut dfa_config = DFA::config().match_kind(match_kind).unicode_word_boundary(true); // When doing an overlapping search, we might try to find the start of each // match with a custom search routine. In that case, we need to tell the // reverse search (for the start offset) which pattern to look for. The // only way that API works is when anchored starting states are compiled // for each pattern. This does technically also enable it for the forward // DFA, but we're okay with that. if test.search_kind() == SearchKind::Overlapping { dfa_config = dfa_config.starts_for_each_pattern(true); } builder .syntax(config_syntax(test)) .thompson(config_thompson(test)) .dfa(dfa_config); true } /// Configuration of a Thompson NFA compiler from a regex test. fn config_thompson(test: &RegexTest) -> thompson::Config { let mut lookm = regex_automata::util::look::LookMatcher::new(); lookm.set_line_terminator(test.line_terminator()); thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) } /// Configuration of the regex parser from a regex test. fn config_syntax(test: &RegexTest) -> syntax::Config { syntax::Config::new() .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .utf8(test.utf8()) .line_terminator(test.line_terminator()) } /// Execute an overlapping search, and for each match found, also find its /// overlapping starting positions. /// /// N.B. This routine used to be part of the crate API, but 1) it wasn't clear /// to me how useful it was and 2) it wasn't clear to me what its semantics /// should be. In particular, a potentially surprising footgun of this routine /// that it is worst case *quadratic* in the size of the haystack. Namely, it's /// possible to report a match at every position, and for every such position, /// scan all the way to the beginning of the haystack to find the starting /// position. Typical leftmost non-overlapping searches don't suffer from this /// because, well, matches can't overlap. So subsequent searches after a match /// is found don't revisit previously scanned parts of the haystack. /// /// Its semantics can be strange for other reasons too. For example, given /// the regex '.*' and the haystack 'zz', the full set of overlapping matches /// is: [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2]. The ordering of /// those matches is quite strange, but makes sense when you think about the /// implementation: an end offset is found left-to-right, and then one or more /// starting offsets are found right-to-left. /// /// Nevertheless, we provide this routine in our test suite because it's /// useful to test the low level DFA overlapping search and our test suite /// is written in a way that requires starting offsets. fn try_search_overlapping( re: &Regex, cache: &mut regex::Cache, input: &Input<'_>, ) -> Result<TestResult> { let mut matches = vec![]; let mut fwd_state = OverlappingState::start(); let (fwd_dfa, rev_dfa) = (re.forward(), re.reverse()); let (fwd_cache, rev_cache) = cache.as_parts_mut(); while let Some(end) = { fwd_dfa.try_search_overlapping_fwd( fwd_cache, input, &mut fwd_state, )?; fwd_state.get_match() } { let revsearch = input .clone() .range(input.start()..end.offset()) .anchored(Anchored::Pattern(end.pattern())) .earliest(false); let mut rev_state = OverlappingState::start(); while let Some(start) = { rev_dfa.try_search_overlapping_rev( rev_cache, &revsearch, &mut rev_state, )?; rev_state.get_match() } { let span = Span { start: start.offset(), end: end.offset() }; let mat = Match { id: end.pattern().as_usize(), span }; matches.push(mat); } } Ok(TestResult::matches(matches)) } <file_sep>/regex-cli/args/haystack.rs use std::path::PathBuf; use { anyhow::Context, bstr::{BStr, BString, ByteSlice, ByteVec}, lexopt::{Arg, Parser, ValueExt}, }; use crate::args::{Configurable, Usage}; /// A configuration object for reading a single haystack from the command line. /// /// This supports reading either an inline haystack specified via the /// `-y/--haystack` flag, or via a positional argument pointing to a file path. /// /// This supports reading exactly one haystack. If more than one are provided, /// then an error is returned at configuration time. If none are provided, then /// an error is returned when one attempts to retrieve the haystack. #[derive(Debug, Default)] pub struct Config { kind: Option<Kind>, } impl Config { /// Returns the haystack contents in this configuration. /// /// If the haystack was specified via a file path, then this returns the /// entire contents of the file on to the heap. pub fn get(&self) -> anyhow::Result<BString> { match self.kind { Some(Kind::Inline(ref haystack)) => Ok(haystack.clone()), Some(Kind::Path(ref path)) => { let contents = std::fs::read(&path).with_context(|| { anyhow::anyhow!("failed to read {}", path.display()) })?; Ok(BString::from(contents)) } None => anyhow::bail!( "haystack is required via the -y/--haystack flag \ or via a positional argument", ), } } /// If the haystack is a file, then memory map and pass the contents of the /// file to the given closure. Otherwise, if it's an inline literal, then /// pass it to the closure as-is. pub fn with<T>( &self, mut f: impl FnMut(&BStr) -> anyhow::Result<T>, ) -> anyhow::Result<T> { match self.kind { Some(Kind::Inline(ref haystack)) => f(haystack.as_bstr()), Some(Kind::Path(ref path)) => { let file = std::fs::File::open(path).with_context(|| { format!("failed to open {}", path.display()) })?; // SAFETY: We assume this is OK to do since we assume that our // search input is immutable. We specifically never try to // mutate the bytes from the file or treat them as anything // other than a slice of bytes. let mmap = unsafe { memmap2::Mmap::map(&file).with_context(|| { format!("failed to mmap {}", path.display()) })? }; f(<&BStr>::from(&*mmap)) } None => anyhow::bail!( "haystack is required via the -y/--haystack flag \ or via a positional argument", ), } } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('y') | Arg::Long("haystack") => { anyhow::ensure!( self.kind.is_none(), "only one haystack is allowed", ); let hay = p.value().context("-y/--haystack needs a value")?; let hay = hay .string() .context("-y/--haystack must be valid UTF-8")?; let hay = Vec::unescape_bytes(&hay); self.kind = Some(Kind::Inline(BString::from(hay))); } Arg::Value(ref mut v) => { anyhow::ensure!( self.kind.is_none(), "only one haystack is allowed", ); let path = PathBuf::from(std::mem::take(v)); self.kind = Some(Kind::Path(path)); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[Usage::new( "-y, --haystack <haystack>", "Provide an inline haystack on the command line.", r#" This flag provides an inline haystack on the command line. That is, the value of this flag is *not* a file path, but the haystack contents itself. This is convenient for small regex searches because it lets one skip creating a file or other shenanigans. The haystack contents must be valid UTF-8, but it support escape sequences. So for example, "-y 'a\xFF\t'" corresponds to the byte sequence 0x61 0xFF 0x09. Note that exactly one haystack is permitted. The haystack can either be specified inline with this flag, or can be provided as a file path via a positional argument. "#, )]; USAGES } } /// The kind of haystack specified on the command line. /// /// We don't read the file path contents at arg parsing time so that we can /// be a little more flexible. For example, by providing a way to memory map /// the haystack contents. #[derive(Debug)] enum Kind { Inline(BString), Path(PathBuf), } <file_sep>/src/bytes.rs /*! Search for regex matches in `&[u8]` haystacks. This module provides a nearly identical API via [`Regex`] to the one found in the top-level of this crate. There are two important differences: 1. Matching is done on `&[u8]` instead of `&str`. Additionally, `Vec<u8>` is used where `String` would have been used in the top-level API. 2. Unicode support can be disabled even when disabling it would result in matching invalid UTF-8 bytes. # Example: match null terminated string This shows how to find all null-terminated strings in a slice of bytes. This works even if a C string contains invalid UTF-8. ```rust use regex::bytes::Regex; let re = Regex::new(r"(?-u)(?<cstr>[^\x00]+)\x00").unwrap(); let hay = b"foo\x00qu\xFFux\x00baz\x00"; // Extract all of the strings without the NUL terminator from each match. // The unwrap is OK here since a match requires the `cstr` capture to match. let cstrs: Vec<&[u8]> = re.captures_iter(hay) .map(|c| c.name("cstr").unwrap().as_bytes()) .collect(); assert_eq!(cstrs, vec![&b"foo"[..], &b"qu\xFFux"[..], &b"baz"[..]]); ``` # Example: selectively enable Unicode support This shows how to match an arbitrary byte pattern followed by a UTF-8 encoded string (e.g., to extract a title from a Matroska file): ```rust use regex::bytes::Regex; let re = Regex::new( r"(?-u)\x7b\xa9(?:[\x80-\xfe]|[\x40-\xff].)(?u:(.*))" ).unwrap(); let hay = b"\x12\xd0\x3b\x5f\x7b\xa9\x85\xe2\x98\x83\x80\x98\x54\x76\x68\x65"; // Notice that despite the `.*` at the end, it will only match valid UTF-8 // because Unicode mode was enabled with the `u` flag. Without the `u` flag, // the `.*` would match the rest of the bytes regardless of whether they were // valid UTF-8. let (_, [title]) = re.captures(hay).unwrap().extract(); assert_eq!(title, b"\xE2\x98\x83"); // We can UTF-8 decode the title now. And the unwrap here // is correct because the existence of a match guarantees // that `title` is valid UTF-8. let title = std::str::from_utf8(title).unwrap(); assert_eq!(title, "☃"); ``` In general, if the Unicode flag is enabled in a capture group and that capture is part of the overall match, then the capture is *guaranteed* to be valid UTF-8. # Syntax The supported syntax is pretty much the same as the syntax for Unicode regular expressions with a few changes that make sense for matching arbitrary bytes: 1. The `u` flag can be disabled even when disabling it might cause the regex to match invalid UTF-8. When the `u` flag is disabled, the regex is said to be in "ASCII compatible" mode. 2. In ASCII compatible mode, neither Unicode scalar values nor Unicode character classes are allowed. 3. In ASCII compatible mode, Perl character classes (`\w`, `\d` and `\s`) revert to their typical ASCII definition. `\w` maps to `[[:word:]]`, `\d` maps to `[[:digit:]]` and `\s` maps to `[[:space:]]`. 4. In ASCII compatible mode, word boundaries use the ASCII compatible `\w` to determine whether a byte is a word byte or not. 5. Hexadecimal notation can be used to specify arbitrary bytes instead of Unicode codepoints. For example, in ASCII compatible mode, `\xFF` matches the literal byte `\xFF`, while in Unicode mode, `\xFF` is the Unicode codepoint `U+00FF` that matches its UTF-8 encoding of `\xC3\xBF`. Similarly for octal notation when enabled. 6. In ASCII compatible mode, `.` matches any *byte* except for `\n`. When the `s` flag is additionally enabled, `.` matches any byte. # Performance In general, one should expect performance on `&[u8]` to be roughly similar to performance on `&str`. */ pub use crate::{builders::bytes::*, regex::bytes::*, regexset::bytes::*}; <file_sep>/testdata/README.md This directory contains a large suite of regex tests defined in a TOML format. They are used to drive tests in `tests/lib.rs`, `regex-automata/tests/lib.rs` and `regex-lite/tests/lib.rs`. See the [`regex-test`][regex-test] crate documentation for an explanation of the format and how it generates tests. The basic idea here is that we have many different regex engines but generally one set of tests. We want to be able to run those tests (or most of them) on every engine. Prior to `regex 1.9`, we used to do this with a hodge podge soup of macros and a different test executable for each engine. It overall took a longer time to compile, was harder to maintain and it made the test definitions themselves less clear. In `regex 1.9`, when we moved over to `regex-automata`, the situation got a lot worse because of an increase in the number of engines. So I devised an engine independent format for testing regex patterns and their semantics. Note: the naming scheme used in these tests isn't terribly consistent. It would be great to fix that. [regex-test]: https://docs.rs/regex-test <file_sep>/regex-automata/src/nfa/thompson/range_trie.rs /* I've called the primary data structure in this module a "range trie." As far as I can tell, there is no prior art on a data structure like this, however, it's likely someone somewhere has built something like it. Searching for "range trie" turns up the paper "Range Tries for Scalable Address Lookup," but it does not appear relevant. The range trie is just like a trie in that it is a special case of a deterministic finite state machine. It has states and each state has a set of transitions to other states. It is acyclic, and, like a normal trie, it makes no attempt to reuse common suffixes among its elements. The key difference between a normal trie and a range trie below is that a range trie operates on *contiguous sequences* of bytes instead of singleton bytes. One could say say that our alphabet is ranges of bytes instead of bytes themselves, except a key part of range trie construction is splitting ranges apart to ensure there is at most one transition that can be taken for any byte in a given state. I've tried to explain the details of how the range trie works below, so for now, we are left with trying to understand what problem we're trying to solve. Which is itself fairly involved! At the highest level, here's what we want to do. We want to convert a sequence of Unicode codepoints into a finite state machine whose transitions are over *bytes* and *not* Unicode codepoints. We want this because it makes said finite state machines much smaller and much faster to execute. As a simple example, consider a byte oriented automaton for all Unicode scalar values (0x00 through 0x10FFFF, not including surrogate codepoints): [00-7F] [C2-DF][80-BF] [E0-E0][A0-BF][80-BF] [E1-EC][80-BF][80-BF] [ED-ED][80-9F][80-BF] [EE-EF][80-BF][80-BF] [F0-F0][90-BF][80-BF][80-BF] [F1-F3][80-BF][80-BF][80-BF] [F4-F4][80-8F][80-BF][80-BF] (These byte ranges are generated via the regex-syntax::utf8 module, which was based on <NAME>'s code in RE2, which was in turn based on Ken Thompson's implementation of the same idea in his Plan9 implementation of grep.) It should be fairly straight-forward to see how one could compile this into a DFA. The sequences are sorted and non-overlapping. Essentially, you could build a trie from this fairly easy. The problem comes when your initial range (in this case, 0x00-0x10FFFF) isn't so nice. For example, the class represented by '\w' contains only a tenth of the codepoints that 0x00-0x10FFFF contains, but if we were to write out the byte based ranges as we did above, the list would stretch to 892 entries! This turns into quite a large NFA with a few thousand states. Turning this beast into a DFA takes quite a bit of time. We are thus left with trying to trim down the number of states we produce as early as possible. One approach (used by RE2 and still by the regex crate, at time of writing) is to try to find common suffixes while building NFA states for the above and reuse them. This is very cheap to do and one can control precisely how much extra memory you want to use for the cache. Another approach, however, is to reuse an algorithm for constructing a *minimal* DFA from a sorted sequence of inputs. I don't want to go into the full details here, but I explain it in more depth in my blog post on FSTs[1]. Note that the algorithm was not invented by me, but was published in paper by Daciuk et al. in 2000 called "Incremental Construction of MinimalAcyclic Finite-State Automata." Like the suffix cache approach above, it is also possible to control the amount of extra memory one uses, although this usually comes with the cost of sacrificing true minimality. (But it's typically close enough with a reasonably sized cache of states.) The catch is that Daciuk's algorithm only works if you add your keys in lexicographic ascending order. In our case, since we're dealing with ranges, we also need the additional requirement that ranges are either equivalent or do not overlap at all. For example, if one were given the following byte ranges: [BC-BF][80-BF] [BC-BF][90-BF] Then Daciuk's algorithm would not work, since there is nothing to handle the fact that the ranges overlap. They would need to be split apart. Thankfully, Thompson's algorithm for producing byte ranges for Unicode codepoint ranges meets both of our requirements. (A proof for this eludes me, but it appears true.) ... however, we would also like to be able to compile UTF-8 automata in reverse. We want this because in order to find the starting location of a match using a DFA, we need to run a second DFA---a reversed version of the forward DFA---backwards to discover the match location. Unfortunately, if we reverse our byte sequences for 0x00-0x10FFFF, we get sequences that are can overlap, even if they are sorted: [00-7F] [80-BF][80-9F][ED-ED] [80-BF][80-BF][80-8F][F4-F4] [80-BF][80-BF][80-BF][F1-F3] [80-BF][80-BF][90-BF][F0-F0] [80-BF][80-BF][E1-EC] [80-BF][80-BF][EE-EF] [80-BF][A0-BF][E0-E0] [80-BF][C2-DF] For example, '[80-BF][80-BF][EE-EF]' and '[80-BF][A0-BF][E0-E0]' have overlapping ranges between '[80-BF]' and '[A0-BF]'. Thus, there is no simple way to apply Daciuk's algorithm. And thus, the range trie was born. The range trie's only purpose is to take sequences of byte ranges like the ones above, collect them into a trie and then spit them out in a sorted fashion with no overlapping ranges. For example, 0x00-0x10FFFF gets translated to: [0-7F] [80-BF][80-9F][80-8F][F1-F3] [80-BF][80-9F][80-8F][F4] [80-BF][80-9F][90-BF][F0] [80-BF][80-9F][90-BF][F1-F3] [80-BF][80-9F][E1-EC] [80-BF][80-9F][ED] [80-BF][80-9F][EE-EF] [80-BF][A0-BF][80-8F][F1-F3] [80-BF][A0-BF][80-8F][F4] [80-BF][A0-BF][90-BF][F0] [80-BF][A0-BF][90-BF][F1-F3] [80-BF][A0-BF][E0] [80-BF][A0-BF][E1-EC] [80-BF][A0-BF][EE-EF] [80-BF][C2-DF] We've thus satisfied our requirements for running Daciuk's algorithm. All sequences of ranges are sorted, and any corresponding ranges are either exactly equivalent or non-overlapping. In effect, a range trie is building a DFA from a sequence of arbitrary byte ranges. But it uses an algorithm custom tailored to its input, so it is not as costly as traditional DFA construction. While it is still quite a bit more costly than the forward case (which only needs Daciuk's algorithm), it winds up saving a substantial amount of time if one is doing a full DFA powerset construction later by virtue of producing a much much smaller NFA. [1] - https://blog.burntsushi.net/transducers/ [2] - https://www.mitpressjournals.org/doi/pdfplus/10.1162/089120100561601 */ use core::{cell::RefCell, convert::TryFrom, fmt, mem, ops::RangeInclusive}; use alloc::{format, string::String, vec, vec::Vec}; use regex_syntax::utf8::Utf8Range; use crate::util::primitives::StateID; /// There is only one final state in this trie. Every sequence of byte ranges /// added shares the same final state. const FINAL: StateID = StateID::ZERO; /// The root state of the trie. const ROOT: StateID = StateID::new_unchecked(1); /// A range trie represents an ordered set of sequences of bytes. /// /// A range trie accepts as input a sequence of byte ranges and merges /// them into the existing set such that the trie can produce a sorted /// non-overlapping sequence of byte ranges. The sequence emitted corresponds /// precisely to the sequence of bytes matched by the given keys, although the /// byte ranges themselves may be split at different boundaries. /// /// The order complexity of this data structure seems difficult to analyze. /// If the size of a byte is held as a constant, then insertion is clearly /// O(n) where n is the number of byte ranges in the input key. However, if /// k=256 is our alphabet size, then insertion could be O(k^2 * n). In /// particular it seems possible for pathological inputs to cause insertion /// to do a lot of work. However, for what we use this data structure for, /// there should be no pathological inputs since the ultimate source is always /// a sorted set of Unicode scalar value ranges. /// /// Internally, this trie is setup like a finite state machine. Note though /// that it is acyclic. #[derive(Clone)] pub struct RangeTrie { /// The states in this trie. The first is always the shared final state. /// The second is always the root state. Otherwise, there is no /// particular order. states: Vec<State>, /// A free-list of states. When a range trie is cleared, all of its states /// are added to this list. Creating a new state reuses states from this /// list before allocating a new one. free: Vec<State>, /// A stack for traversing this trie to yield sequences of byte ranges in /// lexicographic order. iter_stack: RefCell<Vec<NextIter>>, /// A buffer that stores the current sequence during iteration. iter_ranges: RefCell<Vec<Utf8Range>>, /// A stack used for traversing the trie in order to (deeply) duplicate /// a state. States are recursively duplicated when ranges are split. dupe_stack: Vec<NextDupe>, /// A stack used for traversing the trie during insertion of a new /// sequence of byte ranges. insert_stack: Vec<NextInsert>, } /// A single state in this trie. #[derive(Clone)] struct State { /// A sorted sequence of non-overlapping transitions to other states. Each /// transition corresponds to a single range of bytes. transitions: Vec<Transition>, } /// A transition is a single range of bytes. If a particular byte is in this /// range, then the corresponding machine may transition to the state pointed /// to by `next_id`. #[derive(Clone)] struct Transition { /// The byte range. range: Utf8Range, /// The next state to transition to. next_id: StateID, } impl RangeTrie { /// Create a new empty range trie. pub fn new() -> RangeTrie { let mut trie = RangeTrie { states: vec![], free: vec![], iter_stack: RefCell::new(vec![]), iter_ranges: RefCell::new(vec![]), dupe_stack: vec![], insert_stack: vec![], }; trie.clear(); trie } /// Clear this range trie such that it is empty. Clearing a range trie /// and reusing it can beneficial because this may reuse allocations. pub fn clear(&mut self) { self.free.extend(self.states.drain(..)); self.add_empty(); // final self.add_empty(); // root } /// Iterate over all of the sequences of byte ranges in this trie, and /// call the provided function for each sequence. Iteration occurs in /// lexicographic order. pub fn iter<E, F: FnMut(&[Utf8Range]) -> Result<(), E>>( &self, mut f: F, ) -> Result<(), E> { let mut stack = self.iter_stack.borrow_mut(); stack.clear(); let mut ranges = self.iter_ranges.borrow_mut(); ranges.clear(); // We do iteration in a way that permits us to use a single buffer // for our keys. We iterate in a depth first fashion, while being // careful to expand our frontier as we move deeper in the trie. stack.push(NextIter { state_id: ROOT, tidx: 0 }); while let Some(NextIter { mut state_id, mut tidx }) = stack.pop() { // This could be implemented more simply without an inner loop // here, but at the cost of more stack pushes. loop { let state = self.state(state_id); // If we've visited all transitions in this state, then pop // back to the parent state. if tidx >= state.transitions.len() { ranges.pop(); break; } let t = &state.transitions[tidx]; ranges.push(t.range); if t.next_id == FINAL { f(&ranges)?; ranges.pop(); tidx += 1; } else { // Expand our frontier. Once we come back to this state // via the stack, start in on the next transition. stack.push(NextIter { state_id, tidx: tidx + 1 }); // Otherwise, move to the first transition of the next // state. state_id = t.next_id; tidx = 0; } } } Ok(()) } /// Inserts a new sequence of ranges into this trie. /// /// The sequence given must be non-empty and must not have a length /// exceeding 4. pub fn insert(&mut self, ranges: &[Utf8Range]) { assert!(!ranges.is_empty()); assert!(ranges.len() <= 4); let mut stack = mem::replace(&mut self.insert_stack, vec![]); stack.clear(); stack.push(NextInsert::new(ROOT, ranges)); while let Some(next) = stack.pop() { let (state_id, ranges) = (next.state_id(), next.ranges()); assert!(!ranges.is_empty()); let (mut new, rest) = (ranges[0], &ranges[1..]); // i corresponds to the position of the existing transition on // which we are operating. Typically, the result is to remove the // transition and replace it with two or more new transitions // corresponding to the partitions generated by splitting the // 'new' with the ith transition's range. let mut i = self.state(state_id).find(new); // In this case, there is no overlap *and* the new range is greater // than all existing ranges. So we can just add it to the end. if i == self.state(state_id).transitions.len() { let next_id = NextInsert::push(self, &mut stack, rest); self.add_transition(state_id, new, next_id); continue; } // The need for this loop is a bit subtle, buf basically, after // we've handled the partitions from our initial split, it's // possible that there will be a partition leftover that overlaps // with a subsequent transition. If so, then we have to repeat // the split process again with the leftovers and that subsequent // transition. 'OUTER: loop { let old = self.state(state_id).transitions[i].clone(); let split = match Split::new(old.range, new) { Some(split) => split, None => { let next_id = NextInsert::push(self, &mut stack, rest); self.add_transition_at(i, state_id, new, next_id); continue; } }; let splits = split.as_slice(); // If we only have one partition, then the ranges must be // equivalent. There's nothing to do here for this state, so // just move on to the next one. if splits.len() == 1 { // ... but only if we have anything left to do. if !rest.is_empty() { stack.push(NextInsert::new(old.next_id, rest)); } break; } // At this point, we know that 'split' is non-empty and there // must be some overlap AND that the two ranges are not // equivalent. Therefore, the existing range MUST be removed // and split up somehow. Instead of actually doing the removal // and then a subsequent insertion---with all the memory // shuffling that entails---we simply overwrite the transition // at position `i` for the first new transition we want to // insert. After that, we're forced to do expensive inserts. let mut first = true; let mut add_trans = |trie: &mut RangeTrie, pos, from, range, to| { if first { trie.set_transition_at(pos, from, range, to); first = false; } else { trie.add_transition_at(pos, from, range, to); } }; for (j, &srange) in splits.iter().enumerate() { match srange { SplitRange::Old(r) => { // Deep clone the state pointed to by the ith // transition. This is always necessary since 'old' // is always coupled with at least a 'both' // partition. We don't want any new changes made // via the 'both' partition to impact the part of // the transition that doesn't overlap with the // new range. let dup_id = self.duplicate(old.next_id); add_trans(self, i, state_id, r, dup_id); } SplitRange::New(r) => { // This is a bit subtle, but if this happens to be // the last partition in our split, it is possible // that this overlaps with a subsequent transition. // If it does, then we must repeat the whole // splitting process over again with `r` and the // subsequent transition. { let trans = &self.state(state_id).transitions; if j + 1 == splits.len() && i < trans.len() && intersects(r, trans[i].range) { new = r; continue 'OUTER; } } // ... otherwise, setup exploration for a new // empty state and add a brand new transition for // this new range. let next_id = NextInsert::push(self, &mut stack, rest); add_trans(self, i, state_id, r, next_id); } SplitRange::Both(r) => { // Continue adding the remaining ranges on this // path and update the transition with the new // range. if !rest.is_empty() { stack.push(NextInsert::new(old.next_id, rest)); } add_trans(self, i, state_id, r, old.next_id); } } i += 1; } // If we've reached this point, then we know that there are // no subsequent transitions with any overlap. Therefore, we // can stop processing this range and move on to the next one. break; } } self.insert_stack = stack; } pub fn add_empty(&mut self) -> StateID { let id = match StateID::try_from(self.states.len()) { Ok(id) => id, Err(_) => { // This generally should not happen since a range trie is // only ever used to compile a single sequence of Unicode // scalar values. If we ever got to this point, we would, at // *minimum*, be using 96GB in just the range trie alone. panic!("too many sequences added to range trie"); } }; // If we have some free states available, then use them to avoid // more allocations. if let Some(mut state) = self.free.pop() { state.clear(); self.states.push(state); } else { self.states.push(State { transitions: vec![] }); } id } /// Performs a deep clone of the given state and returns the duplicate's /// state ID. /// /// A "deep clone" in this context means that the state given along with /// recursively all states that it points to are copied. Once complete, /// the given state ID and the returned state ID share nothing. /// /// This is useful during range trie insertion when a new range overlaps /// with an existing range that is bigger than the new one. The part /// of the existing range that does *not* overlap with the new one is /// duplicated so that adding the new range to the overlap doesn't disturb /// the non-overlapping portion. /// /// There's one exception: if old_id is the final state, then it is not /// duplicated and the same final state is returned. This is because all /// final states in this trie are equivalent. fn duplicate(&mut self, old_id: StateID) -> StateID { if old_id == FINAL { return FINAL; } let mut stack = mem::replace(&mut self.dupe_stack, vec![]); stack.clear(); let new_id = self.add_empty(); // old_id is the state we're cloning and new_id is the ID of the // duplicated state for old_id. stack.push(NextDupe { old_id, new_id }); while let Some(NextDupe { old_id, new_id }) = stack.pop() { for i in 0..self.state(old_id).transitions.len() { let t = self.state(old_id).transitions[i].clone(); if t.next_id == FINAL { // All final states are the same, so there's no need to // duplicate it. self.add_transition(new_id, t.range, FINAL); continue; } let new_child_id = self.add_empty(); self.add_transition(new_id, t.range, new_child_id); stack.push(NextDupe { old_id: t.next_id, new_id: new_child_id, }); } } self.dupe_stack = stack; new_id } /// Adds the given transition to the given state. /// /// Callers must ensure that all previous transitions in this state /// are lexicographically smaller than the given range. fn add_transition( &mut self, from_id: StateID, range: Utf8Range, next_id: StateID, ) { self.state_mut(from_id) .transitions .push(Transition { range, next_id }); } /// Like `add_transition`, except this inserts the transition just before /// the ith transition. fn add_transition_at( &mut self, i: usize, from_id: StateID, range: Utf8Range, next_id: StateID, ) { self.state_mut(from_id) .transitions .insert(i, Transition { range, next_id }); } /// Overwrites the transition at position i with the given transition. fn set_transition_at( &mut self, i: usize, from_id: StateID, range: Utf8Range, next_id: StateID, ) { self.state_mut(from_id).transitions[i] = Transition { range, next_id }; } /// Return an immutable borrow for the state with the given ID. fn state(&self, id: StateID) -> &State { &self.states[id] } /// Return a mutable borrow for the state with the given ID. fn state_mut(&mut self, id: StateID) -> &mut State { &mut self.states[id] } } impl State { /// Find the position at which the given range should be inserted in this /// state. /// /// The position returned is always in the inclusive range /// [0, transitions.len()]. If 'transitions.len()' is returned, then the /// given range overlaps with no other range in this state *and* is greater /// than all of them. /// /// For all other possible positions, the given range either overlaps /// with the transition at that position or is otherwise less than it /// with no overlap (and is greater than the previous transition). In the /// former case, careful attention must be paid to inserting this range /// as a new transition. In the latter case, the range can be inserted as /// a new transition at the given position without disrupting any other /// transitions. fn find(&self, range: Utf8Range) -> usize { /// Returns the position `i` at which `pred(xs[i])` first returns true /// such that for all `j >= i`, `pred(xs[j]) == true`. If `pred` never /// returns true, then `xs.len()` is returned. /// /// We roll our own binary search because it doesn't seem like the /// standard library's binary search can be used here. Namely, if /// there is an overlapping range, then we want to find the first such /// occurrence, but there may be many. Or at least, it's not quite /// clear to me how to do it. fn binary_search<T, F>(xs: &[T], mut pred: F) -> usize where F: FnMut(&T) -> bool, { let (mut left, mut right) = (0, xs.len()); while left < right { // Overflow is impossible because xs.len() <= 256. let mid = (left + right) / 2; if pred(&xs[mid]) { right = mid; } else { left = mid + 1; } } left } // Benchmarks suggest that binary search is just a bit faster than // straight linear search. Specifically when using the debug tool: // // hyperfine "regex-cli debug nfa thompson --quiet --reverse '\w{90} ecurB'" binary_search(&self.transitions, |t| range.start <= t.range.end) } /// Clear this state such that it has zero transitions. fn clear(&mut self) { self.transitions.clear(); } } /// The next state to process during duplication. #[derive(Clone, Debug)] struct NextDupe { /// The state we want to duplicate. old_id: StateID, /// The ID of the new state that is a duplicate of old_id. new_id: StateID, } /// The next state (and its corresponding transition) that we want to visit /// during iteration in lexicographic order. #[derive(Clone, Debug)] struct NextIter { state_id: StateID, tidx: usize, } /// The next state to process during insertion and any remaining ranges that we /// want to add for a particular sequence of ranges. The first such instance /// is always the root state along with all ranges given. #[derive(Clone, Debug)] struct NextInsert { /// The next state to begin inserting ranges. This state should be the /// state at which `ranges[0]` should be inserted. state_id: StateID, /// The ranges to insert. We used a fixed-size array here to avoid an /// allocation. ranges: [Utf8Range; 4], /// The number of valid ranges in the above array. len: u8, } impl NextInsert { /// Create the next item to visit. The given state ID should correspond /// to the state at which the first range in the given slice should be /// inserted. The slice given must not be empty and it must be no longer /// than 4. fn new(state_id: StateID, ranges: &[Utf8Range]) -> NextInsert { let len = ranges.len(); assert!(len > 0); assert!(len <= 4); let mut tmp = [Utf8Range { start: 0, end: 0 }; 4]; tmp[..len].copy_from_slice(ranges); NextInsert { state_id, ranges: tmp, len: u8::try_from(len).unwrap() } } /// Push a new empty state to visit along with any remaining ranges that /// still need to be inserted. The ID of the new empty state is returned. /// /// If ranges is empty, then no new state is created and FINAL is returned. fn push( trie: &mut RangeTrie, stack: &mut Vec<NextInsert>, ranges: &[Utf8Range], ) -> StateID { if ranges.is_empty() { FINAL } else { let next_id = trie.add_empty(); stack.push(NextInsert::new(next_id, ranges)); next_id } } /// Return the ID of the state to visit. fn state_id(&self) -> StateID { self.state_id } /// Return the remaining ranges to insert. fn ranges(&self) -> &[Utf8Range] { &self.ranges[..usize::try_from(self.len).unwrap()] } } /// Split represents a partitioning of two ranges into one or more ranges. This /// is the secret sauce that makes a range trie work, as it's what tells us /// how to deal with two overlapping but unequal ranges during insertion. /// /// Essentially, either two ranges overlap or they don't. If they don't, then /// handling insertion is easy: just insert the new range into its /// lexicographically correct position. Since it does not overlap with anything /// else, no other transitions are impacted by the new range. /// /// If they do overlap though, there are generally three possible cases to /// handle: /// /// 1. The part where the two ranges actually overlap. i.e., The intersection. /// 2. The part of the existing range that is not in the the new range. /// 3. The part of the new range that is not in the old range. /// /// (1) is guaranteed to always occur since all overlapping ranges have a /// non-empty intersection. If the two ranges are not equivalent, then at /// least one of (2) or (3) is guaranteed to occur as well. In some cases, /// e.g., `[0-4]` and `[4-9]`, all three cases will occur. /// /// This `Split` type is responsible for providing (1), (2) and (3) for any /// possible pair of byte ranges. /// /// As for insertion, for the overlap in (1), the remaining ranges to insert /// should be added by following the corresponding transition. However, this /// should only be done for the overlapping parts of the range. If there was /// a part of the existing range that was not in the new range, then that /// existing part must be split off from the transition and duplicated. The /// remaining parts of the overlap can then be added to using the new ranges /// without disturbing the existing range. /// /// Handling the case for the part of a new range that is not in an existing /// range is seemingly easy. Just treat it as if it were a non-overlapping /// range. The problem here is that if this new non-overlapping range occurs /// after both (1) and (2), then it's possible that it can overlap with the /// next transition in the current state. If it does, then the whole process /// must be repeated! /// /// # Details of the 3 cases /// /// The following details the various cases that are implemented in code /// below. It's plausible that the number of cases is not actually minimal, /// but it's important for this code to remain at least somewhat readable. /// /// Given [a,b] and [x,y], where a <= b, x <= y, b < 256 and y < 256, we define /// the follow distinct relationships where at least one must apply. The order /// of these matters, since multiple can match. The first to match applies. /// /// 1. b < x <=> [a,b] < [x,y] /// 2. y < a <=> [x,y] < [a,b] /// /// In the case of (1) and (2), these are the only cases where there is no /// overlap. Or otherwise, the intersection of [a,b] and [x,y] is empty. In /// order to compute the intersection, one can do [max(a,x), min(b,y)]. The /// intersection in all of the following cases is non-empty. /// /// 3. a = x && b = y <=> [a,b] == [x,y] /// 4. a = x && b < y <=> [x,y] right-extends [a,b] /// 5. b = y && a > x <=> [x,y] left-extends [a,b] /// 6. x = a && y < b <=> [a,b] right-extends [x,y] /// 7. y = b && x > a <=> [a,b] left-extends [x,y] /// 8. a > x && b < y <=> [x,y] covers [a,b] /// 9. x > a && y < b <=> [a,b] covers [x,y] /// 10. b = x && a < y <=> [a,b] is left-adjacent to [x,y] /// 11. y = a && x < b <=> [x,y] is left-adjacent to [a,b] /// 12. b > x && b < y <=> [a,b] left-overlaps [x,y] /// 13. y > a && y < b <=> [x,y] left-overlaps [a,b] /// /// In cases 3-13, we can form rules that partition the ranges into a /// non-overlapping ordered sequence of ranges: /// /// 3. [a,b] /// 4. [a,b], [b+1,y] /// 5. [x,a-1], [a,b] /// 6. [x,y], [y+1,b] /// 7. [a,x-1], [x,y] /// 8. [x,a-1], [a,b], [b+1,y] /// 9. [a,x-1], [x,y], [y+1,b] /// 10. [a,b-1], [b,b], [b+1,y] /// 11. [x,y-1], [y,y], [y+1,b] /// 12. [a,x-1], [x,b], [b+1,y] /// 13. [x,a-1], [a,y], [y+1,b] /// /// In the code below, we go a step further and identify each of the above /// outputs as belonging either to the overlap of the two ranges or to one /// of [a,b] or [x,y] exclusively. #[derive(Clone, Debug, Eq, PartialEq)] struct Split { partitions: [SplitRange; 3], len: usize, } /// A tagged range indicating how it was derived from a pair of ranges. #[derive(Clone, Copy, Debug, Eq, PartialEq)] enum SplitRange { Old(Utf8Range), New(Utf8Range), Both(Utf8Range), } impl Split { /// Create a partitioning of the given ranges. /// /// If the given ranges have an empty intersection, then None is returned. fn new(o: Utf8Range, n: Utf8Range) -> Option<Split> { let range = |r: RangeInclusive<u8>| Utf8Range { start: *r.start(), end: *r.end(), }; let old = |r| SplitRange::Old(range(r)); let new = |r| SplitRange::New(range(r)); let both = |r| SplitRange::Both(range(r)); // Use same names as the comment above to make it easier to compare. let (a, b, x, y) = (o.start, o.end, n.start, n.end); if b < x || y < a { // case 1, case 2 None } else if a == x && b == y { // case 3 Some(Split::parts1(both(a..=b))) } else if a == x && b < y { // case 4 Some(Split::parts2(both(a..=b), new(b + 1..=y))) } else if b == y && a > x { // case 5 Some(Split::parts2(new(x..=a - 1), both(a..=b))) } else if x == a && y < b { // case 6 Some(Split::parts2(both(x..=y), old(y + 1..=b))) } else if y == b && x > a { // case 7 Some(Split::parts2(old(a..=x - 1), both(x..=y))) } else if a > x && b < y { // case 8 Some(Split::parts3(new(x..=a - 1), both(a..=b), new(b + 1..=y))) } else if x > a && y < b { // case 9 Some(Split::parts3(old(a..=x - 1), both(x..=y), old(y + 1..=b))) } else if b == x && a < y { // case 10 Some(Split::parts3(old(a..=b - 1), both(b..=b), new(b + 1..=y))) } else if y == a && x < b { // case 11 Some(Split::parts3(new(x..=y - 1), both(y..=y), old(y + 1..=b))) } else if b > x && b < y { // case 12 Some(Split::parts3(old(a..=x - 1), both(x..=b), new(b + 1..=y))) } else if y > a && y < b { // case 13 Some(Split::parts3(new(x..=a - 1), both(a..=y), old(y + 1..=b))) } else { unreachable!() } } /// Create a new split with a single partition. This only occurs when two /// ranges are equivalent. fn parts1(r1: SplitRange) -> Split { // This value doesn't matter since it is never accessed. let nada = SplitRange::Old(Utf8Range { start: 0, end: 0 }); Split { partitions: [r1, nada, nada], len: 1 } } /// Create a new split with two partitions. fn parts2(r1: SplitRange, r2: SplitRange) -> Split { // This value doesn't matter since it is never accessed. let nada = SplitRange::Old(Utf8Range { start: 0, end: 0 }); Split { partitions: [r1, r2, nada], len: 2 } } /// Create a new split with three partitions. fn parts3(r1: SplitRange, r2: SplitRange, r3: SplitRange) -> Split { Split { partitions: [r1, r2, r3], len: 3 } } /// Return the partitions in this split as a slice. fn as_slice(&self) -> &[SplitRange] { &self.partitions[..self.len] } } impl fmt::Debug for RangeTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "")?; for (i, state) in self.states.iter().enumerate() { let status = if i == FINAL.as_usize() { '*' } else { ' ' }; writeln!(f, "{}{:06}: {:?}", status, i, state)?; } Ok(()) } } impl fmt::Debug for State { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let rs = self .transitions .iter() .map(|t| format!("{:?}", t)) .collect::<Vec<String>>() .join(", "); write!(f, "{}", rs) } } impl fmt::Debug for Transition { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.range.start == self.range.end { write!( f, "{:02X} => {:02X}", self.range.start, self.next_id.as_usize(), ) } else { write!( f, "{:02X}-{:02X} => {:02X}", self.range.start, self.range.end, self.next_id.as_usize(), ) } } } /// Returns true if and only if the given ranges intersect. fn intersects(r1: Utf8Range, r2: Utf8Range) -> bool { !(r1.end < r2.start || r2.end < r1.start) } #[cfg(test)] mod tests { use core::ops::RangeInclusive; use regex_syntax::utf8::Utf8Range; use super::*; fn r(range: RangeInclusive<u8>) -> Utf8Range { Utf8Range { start: *range.start(), end: *range.end() } } fn split_maybe( old: RangeInclusive<u8>, new: RangeInclusive<u8>, ) -> Option<Split> { Split::new(r(old), r(new)) } fn split( old: RangeInclusive<u8>, new: RangeInclusive<u8>, ) -> Vec<SplitRange> { split_maybe(old, new).unwrap().as_slice().to_vec() } #[test] fn no_splits() { // case 1 assert_eq!(None, split_maybe(0..=1, 2..=3)); // case 2 assert_eq!(None, split_maybe(2..=3, 0..=1)); } #[test] fn splits() { let range = |r: RangeInclusive<u8>| Utf8Range { start: *r.start(), end: *r.end(), }; let old = |r| SplitRange::Old(range(r)); let new = |r| SplitRange::New(range(r)); let both = |r| SplitRange::Both(range(r)); // case 3 assert_eq!(split(0..=0, 0..=0), vec![both(0..=0)]); assert_eq!(split(9..=9, 9..=9), vec![both(9..=9)]); // case 4 assert_eq!(split(0..=5, 0..=6), vec![both(0..=5), new(6..=6)]); assert_eq!(split(0..=5, 0..=8), vec![both(0..=5), new(6..=8)]); assert_eq!(split(5..=5, 5..=8), vec![both(5..=5), new(6..=8)]); // case 5 assert_eq!(split(1..=5, 0..=5), vec![new(0..=0), both(1..=5)]); assert_eq!(split(3..=5, 0..=5), vec![new(0..=2), both(3..=5)]); assert_eq!(split(5..=5, 0..=5), vec![new(0..=4), both(5..=5)]); // case 6 assert_eq!(split(0..=6, 0..=5), vec![both(0..=5), old(6..=6)]); assert_eq!(split(0..=8, 0..=5), vec![both(0..=5), old(6..=8)]); assert_eq!(split(5..=8, 5..=5), vec![both(5..=5), old(6..=8)]); // case 7 assert_eq!(split(0..=5, 1..=5), vec![old(0..=0), both(1..=5)]); assert_eq!(split(0..=5, 3..=5), vec![old(0..=2), both(3..=5)]); assert_eq!(split(0..=5, 5..=5), vec![old(0..=4), both(5..=5)]); // case 8 assert_eq!( split(3..=6, 2..=7), vec![new(2..=2), both(3..=6), new(7..=7)], ); assert_eq!( split(3..=6, 1..=8), vec![new(1..=2), both(3..=6), new(7..=8)], ); // case 9 assert_eq!( split(2..=7, 3..=6), vec![old(2..=2), both(3..=6), old(7..=7)], ); assert_eq!( split(1..=8, 3..=6), vec![old(1..=2), both(3..=6), old(7..=8)], ); // case 10 assert_eq!( split(3..=6, 6..=7), vec![old(3..=5), both(6..=6), new(7..=7)], ); assert_eq!( split(3..=6, 6..=8), vec![old(3..=5), both(6..=6), new(7..=8)], ); assert_eq!( split(5..=6, 6..=7), vec![old(5..=5), both(6..=6), new(7..=7)], ); // case 11 assert_eq!( split(6..=7, 3..=6), vec![new(3..=5), both(6..=6), old(7..=7)], ); assert_eq!( split(6..=8, 3..=6), vec![new(3..=5), both(6..=6), old(7..=8)], ); assert_eq!( split(6..=7, 5..=6), vec![new(5..=5), both(6..=6), old(7..=7)], ); // case 12 assert_eq!( split(3..=7, 5..=9), vec![old(3..=4), both(5..=7), new(8..=9)], ); assert_eq!( split(3..=5, 4..=6), vec![old(3..=3), both(4..=5), new(6..=6)], ); // case 13 assert_eq!( split(5..=9, 3..=7), vec![new(3..=4), both(5..=7), old(8..=9)], ); assert_eq!( split(4..=6, 3..=5), vec![new(3..=3), both(4..=5), old(6..=6)], ); } // Arguably there should be more tests here, but in practice, this data // structure is well covered by the huge number of regex tests. } <file_sep>/regex-automata/tests/fuzz/sparse.rs // This is a regression test for a bug in how special states are handled. The // fuzzer found a case where a state returned true for 'is_special_state' but // *didn't* return true for 'is_dead_state', 'is_quit_state', 'is_match_state', // 'is_start_state' or 'is_accel_state'. This in turn tripped a debug assertion // in the core matching loop that requires 'is_special_state' being true to // imply that one of the other routines returns true. // // We fixed this by adding some validation to both dense and sparse DFAs that // checks that this property is true for every state ID in the DFA. #[test] fn invalid_special_state() { let data = include_bytes!( "testdata/deserialize_sparse_crash-a1b839d899ced76d5d7d0f78f9edb7a421505838", ); let _ = fuzz_run(data); } // This is an interesting case where a fuzzer generated a DFA with // a transition to a state ID that decoded as a valid state, but // where the ID itself did not point to one of the two existing // states for this particular DFA. This combined with marking this // transition's state ID as special but without actually making one of the // 'is_{dead,quit,match,start,accel}_state' predicates return true ended up // tripping the 'debug_assert(dfa.is_quit_state(sid))' code in the search // routine. // // We fixed this in alloc mode by checking that every transition points to a // valid state ID. Technically this bug still exists in core-only mode, but // it's not clear how to fix it. And it's worth pointing out that the search // routine won't panic in production. It will just provide invalid results. And // that's acceptable within the contract of DFA::from_bytes. #[test] fn transition_to_invalid_but_valid_state() { let data = include_bytes!( "testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9", ); let _ = fuzz_run(data); } // Another one caught by the fuzzer where it generated a DFA that reported a // start state as a match state. Since matches are always delayed by one byte, // start states specifically cannot be match states. And indeed, the search // code relies on this. #[test] fn start_state_is_not_match_state() { let data = include_bytes!( "testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000", ); let _ = fuzz_run(data); } // This is variation on 'transition_to_invalid_but_valid_state', but happens // to a start state. Namely, the fuzz data here builds a DFA with a start // state ID that is incorrect but points to a sequence of bytes that satisfies // state decoding validation. This errant state in turn has a non-zero number // of transitions, and its those transitions that point to a state that does // *not* satisfy state decoding validation. But we never checked those. So the // fix here was to add validation of the transitions off of the start state. #[test] fn start_state_has_valid_transitions() { let data = include_bytes!( "testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98", ); let _ = fuzz_run(data); } // This fuzz input generated a DFA with a state whose ID was in the match state // ID range, but where the state itself was encoded with zero pattern IDs. We // added validation code to check this case. #[test] fn match_state_inconsistency() { let data = include_bytes!( "testdata/deserialize_sparse_crash-c383ae07ec5e191422eadc492117439011816570", ); let _ = fuzz_run(data); } // This fuzz input generated a DFA with a state whose ID was in the accelerator // range, but who didn't have any accelerators. This violated an invariant that // assumes that if 'dfa.is_accel_state(sid)' returns true, then the state must // have some accelerators. #[test] fn invalid_accelerators() { let data = include_bytes!( "testdata/deserialize_sparse_crash-d07703ceb94b10dcd9e4acb809f2051420449e2b", ); let _ = fuzz_run(data); } // This fuzz input generated a DFA with a state whose EOI transition led to // a quit state, which is generally considered illegal. Why? Because the EOI // transition is defined over a special sentinel alphabet element and one // cannot configure a DFA to "quit" on that sentinel. #[test] fn eoi_transition_to_quit_state() { let data = include_bytes!( "testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9", ); let _ = fuzz_run(data); } // This is the code from the fuzz target. Kind of sucks to duplicate it here, // but this is fundamentally how we interpret the date. fn fuzz_run(given_data: &[u8]) -> Option<()> { use regex_automata::dfa::Automaton; if given_data.len() < 2 { return None; } let haystack_len = usize::from(given_data[0]); let haystack = given_data.get(1..1 + haystack_len)?; let given_dfa_bytes = given_data.get(1 + haystack_len..)?; // We help the fuzzer along by adding a preamble to the bytes that should // at least make these first parts valid. The preamble expects a very // specific sequence of bytes, so it makes sense to just force this. let label = "rust-regex-automata-dfa-sparse\x00\x00"; assert_eq!(0, label.len() % 4); let endianness_check = 0xFEFFu32.to_ne_bytes().to_vec(); let version_check = 2u32.to_ne_bytes().to_vec(); let mut dfa_bytes: Vec<u8> = vec![]; dfa_bytes.extend(label.as_bytes()); dfa_bytes.extend(&endianness_check); dfa_bytes.extend(&version_check); dfa_bytes.extend(given_dfa_bytes); // This is the real test: checking that any input we give to // DFA::from_bytes will never result in a panic. let (dfa, _) = regex_automata::dfa::sparse::DFA::from_bytes(&dfa_bytes).ok()?; let _ = dfa.try_search_fwd(&regex_automata::Input::new(haystack)); Some(()) } <file_sep>/regex-automata/src/dfa/regex.rs /*! A DFA-backed `Regex`. This module provides [`Regex`], which is defined generically over the [`Automaton`] trait. A `Regex` implements convenience routines you might have come to expect, such as finding the start/end of a match and iterating over all non-overlapping matches. This `Regex` type is limited in its capabilities to what a DFA can provide. Therefore, APIs involving capturing groups, for example, are not provided. Internally, a `Regex` is composed of two DFAs. One is a "forward" DFA that finds the end offset of a match, where as the other is a "reverse" DFA that find the start offset of a match. See the [parent module](crate::dfa) for examples. */ #[cfg(feature = "alloc")] use alloc::vec::Vec; #[cfg(feature = "dfa-build")] use crate::dfa::dense::BuildError; use crate::{ dfa::{automaton::Automaton, dense}, util::{iter, search::Input}, Anchored, Match, MatchError, }; #[cfg(feature = "alloc")] use crate::{ dfa::{sparse, StartKind}, util::search::MatchKind, }; // When the alloc feature is enabled, the regex type sets its A type parameter // to default to an owned dense DFA. But without alloc, we set no default. This // makes things a lot more convenient in the common case, since writing out the // DFA types is pretty annoying. // // Since we have two different definitions but only want to write one doc // string, we use a macro to capture the doc and other attributes once and then // repeat them for each definition. macro_rules! define_regex_type { ($(#[$doc:meta])*) => { #[cfg(feature = "alloc")] $(#[$doc])* pub struct Regex<A = dense::OwnedDFA> { forward: A, reverse: A, } #[cfg(not(feature = "alloc"))] $(#[$doc])* pub struct Regex<A> { forward: A, reverse: A, } }; } define_regex_type!( /// A regular expression that uses deterministic finite automata for fast /// searching. /// /// A regular expression is comprised of two DFAs, a "forward" DFA and a /// "reverse" DFA. The forward DFA is responsible for detecting the end of /// a match while the reverse DFA is responsible for detecting the start /// of a match. Thus, in order to find the bounds of any given match, a /// forward search must first be run followed by a reverse search. A match /// found by the forward DFA guarantees that the reverse DFA will also find /// a match. /// /// The type of the DFA used by a `Regex` corresponds to the `A` type /// parameter, which must satisfy the [`Automaton`] trait. Typically, /// `A` is either a [`dense::DFA`](crate::dfa::dense::DFA) or a /// [`sparse::DFA`](crate::dfa::sparse::DFA), where dense DFAs use more /// memory but search faster, while sparse DFAs use less memory but search /// more slowly. /// /// # Crate features /// /// Note that despite what the documentation auto-generates, the _only_ /// crate feature needed to use this type is `dfa-search`. You do _not_ /// need to enable the `alloc` feature. /// /// By default, a regex's automaton type parameter is set to /// `dense::DFA<Vec<u32>>` when the `alloc` feature is enabled. For most /// in-memory work loads, this is the most convenient type that gives the /// best search performance. When the `alloc` feature is disabled, no /// default type is used. /// /// # When should I use this? /// /// Generally speaking, if you can afford the overhead of building a full /// DFA for your regex, and you don't need things like capturing groups, /// then this is a good choice if you're looking to optimize for matching /// speed. Note however that its speed may be worse than a general purpose /// regex engine if you don't provide a [`dense::Config::prefilter`] to the /// underlying DFA. /// /// # Sparse DFAs /// /// Since a `Regex` is generic over the [`Automaton`] trait, it can be /// used with any kind of DFA. While this crate constructs dense DFAs by /// default, it is easy enough to build corresponding sparse DFAs, and then /// build a regex from them: /// /// ``` /// use regex_automata::dfa::regex::Regex; /// /// // First, build a regex that uses dense DFAs. /// let dense_re = Regex::new("foo[0-9]+")?; /// /// // Second, build sparse DFAs from the forward and reverse dense DFAs. /// let fwd = dense_re.forward().to_sparse()?; /// let rev = dense_re.reverse().to_sparse()?; /// /// // Third, build a new regex from the constituent sparse DFAs. /// let sparse_re = Regex::builder().build_from_dfas(fwd, rev); /// /// // A regex that uses sparse DFAs can be used just like with dense DFAs. /// assert_eq!(true, sparse_re.is_match(b"foo123")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Alternatively, one can use a [`Builder`] to construct a sparse DFA /// more succinctly. (Note though that dense DFAs are still constructed /// first internally, and then converted to sparse DFAs, as in the example /// above.) /// /// ``` /// use regex_automata::dfa::regex::Regex; /// /// let sparse_re = Regex::builder().build_sparse(r"foo[0-9]+")?; /// // A regex that uses sparse DFAs can be used just like with dense DFAs. /// assert!(sparse_re.is_match(b"foo123")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Fallibility /// /// Most of the search routines defined on this type will _panic_ when the /// underlying search fails. This might be because the DFA gave up because /// it saw a quit byte, whether configured explicitly or via heuristic /// Unicode word boundary support, although neither are enabled by default. /// Or it might fail because an invalid `Input` configuration is given, /// for example, with an unsupported [`Anchored`] mode. /// /// If you need to handle these error cases instead of allowing them to /// trigger a panic, then the lower level [`Regex::try_search`] provides /// a fallible API that never panics. /// /// # Example /// /// This example shows how to cause a search to terminate if it sees a /// `\n` byte, and handle the error returned. This could be useful if, for /// example, you wanted to prevent a user supplied pattern from matching /// across a line boundary. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::{self, regex::Regex}, Input, MatchError}; /// /// let re = Regex::builder() /// .dense(dfa::dense::Config::new().quit(b'\n', true)) /// .build(r"foo\p{any}+bar")?; /// /// let input = Input::new("foo\nbar"); /// // Normally this would produce a match, since \p{any} contains '\n'. /// // But since we instructed the automaton to enter a quit state if a /// // '\n' is observed, this produces a match error instead. /// let expected = MatchError::quit(b'\n', 3); /// let got = re.try_search(&input).unwrap_err(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] ); #[cfg(all(feature = "syntax", feature = "dfa-build"))] impl Regex { /// Parse the given regular expression using the default configuration and /// return the corresponding regex. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{Match, dfa::regex::Regex}; /// /// let re = Regex::new("foo[0-9]+bar")?; /// assert_eq!( /// Some(Match::must(0, 3..14)), /// re.find(b"zzzfoo12345barzzz"), /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new(pattern: &str) -> Result<Regex, BuildError> { Builder::new().build(pattern) } /// Like `new`, but parses multiple patterns into a single "regex set." /// This similarly uses the default regex configuration. /// /// # Example /// /// ``` /// use regex_automata::{Match, dfa::regex::Regex}; /// /// let re = Regex::new_many(&["[a-z]+", "[0-9]+"])?; /// /// let mut it = re.find_iter(b"abc 1 foo 4567 0 quux"); /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new_many<P: AsRef<str>>( patterns: &[P], ) -> Result<Regex, BuildError> { Builder::new().build_many(patterns) } } #[cfg(all(feature = "syntax", feature = "dfa-build"))] impl Regex<sparse::DFA<Vec<u8>>> { /// Parse the given regular expression using the default configuration, /// except using sparse DFAs, and return the corresponding regex. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{Match, dfa::regex::Regex}; /// /// let re = Regex::new_sparse("foo[0-9]+bar")?; /// assert_eq!( /// Some(Match::must(0, 3..14)), /// re.find(b"zzzfoo12345barzzz"), /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new_sparse( pattern: &str, ) -> Result<Regex<sparse::DFA<Vec<u8>>>, BuildError> { Builder::new().build_sparse(pattern) } /// Like `new`, but parses multiple patterns into a single "regex set" /// using sparse DFAs. This otherwise similarly uses the default regex /// configuration. /// /// # Example /// /// ``` /// use regex_automata::{Match, dfa::regex::Regex}; /// /// let re = Regex::new_many_sparse(&["[a-z]+", "[0-9]+"])?; /// /// let mut it = re.find_iter(b"abc 1 foo 4567 0 quux"); /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new_many_sparse<P: AsRef<str>>( patterns: &[P], ) -> Result<Regex<sparse::DFA<Vec<u8>>>, BuildError> { Builder::new().build_many_sparse(patterns) } } /// Convenience routines for regex construction. impl Regex<dense::DFA<&'static [u32]>> { /// Return a builder for configuring the construction of a `Regex`. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. /// /// # Example /// /// This example shows how to use the builder to disable UTF-8 mode /// everywhere. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::regex::Regex, nfa::thompson, util::syntax, Match, /// }; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 1..9)); /// let got = re.find(haystack); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn builder() -> Builder { Builder::new() } } /// Standard search routines for finding and iterating over matches. impl<A: Automaton> Regex<A> { /// Returns true if and only if this regex matches the given haystack. /// /// This routine may short circuit if it knows that scanning future input /// will never lead to a different result. In particular, if the underlying /// DFA enters a match state or a dead state, then this routine will return /// `true` or `false`, respectively, without inspecting any future input. /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// Use [`Regex::try_search`] if you want to handle these error conditions. /// /// # Example /// /// ``` /// use regex_automata::dfa::regex::Regex; /// /// let re = Regex::new("foo[0-9]+bar")?; /// assert_eq!(true, re.is_match("foo12345bar")); /// assert_eq!(false, re.is_match("foobar")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_match<'h, I: Into<Input<'h>>>(&self, input: I) -> bool { // Not only can we do an "earliest" search, but we can avoid doing a // reverse scan too. let input = input.into().earliest(true); self.forward().try_search_fwd(&input).map(|x| x.is_some()).unwrap() } /// Returns the start and end offset of the leftmost match. If no match /// exists, then `None` is returned. /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// Use [`Regex::try_search`] if you want to handle these error conditions. /// /// # Example /// /// ``` /// use regex_automata::{Match, dfa::regex::Regex}; /// /// // Greediness is applied appropriately. /// let re = Regex::new("foo[0-9]+")?; /// assert_eq!(Some(Match::must(0, 3..11)), re.find("zzzfoo12345zzz")); /// /// // Even though a match is found after reading the first byte (`a`), /// // the default leftmost-first match semantics demand that we find the /// // earliest match that prefers earlier parts of the pattern over latter /// // parts. /// let re = Regex::new("abc|a")?; /// assert_eq!(Some(Match::must(0, 0..3)), re.find("abc")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find<'h, I: Into<Input<'h>>>(&self, input: I) -> Option<Match> { self.try_search(&input.into()).unwrap() } /// Returns an iterator over all non-overlapping leftmost matches in the /// given bytes. If no match exists, then the iterator yields no elements. /// /// This corresponds to the "standard" regex search iterator. /// /// # Panics /// /// If the search returns an error during iteration, then iteration /// panics. See [`Regex::find`] for the panic conditions. /// /// Use [`Regex::try_search`] with /// [`util::iter::Searcher`](crate::util::iter::Searcher) if you want to /// handle these error conditions. /// /// # Example /// /// ``` /// use regex_automata::{Match, dfa::regex::Regex}; /// /// let re = Regex::new("foo[0-9]+")?; /// let text = "foo1 foo12 foo123"; /// let matches: Vec<Match> = re.find_iter(text).collect(); /// assert_eq!(matches, vec![ /// Match::must(0, 0..4), /// Match::must(0, 5..10), /// Match::must(0, 11..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find_iter<'r, 'h, I: Into<Input<'h>>>( &'r self, input: I, ) -> FindMatches<'r, 'h, A> { let it = iter::Searcher::new(input.into()); FindMatches { re: self, it } } } /// Lower level fallible search routines that permit controlling where the /// search starts and ends in a particular sequence. impl<A: Automaton> Regex<A> { /// Returns the start and end offset of the leftmost match. If no match /// exists, then `None` is returned. /// /// This is like [`Regex::find`] but with two differences: /// /// 1. It is not generic over `Into<Input>` and instead accepts a /// `&Input`. This permits reusing the same `Input` for multiple searches /// without needing to create a new one. This _may_ help with latency. /// 2. It returns an error if the search could not complete where as /// [`Regex::find`] will panic. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in the following circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. #[inline] pub fn try_search( &self, input: &Input<'_>, ) -> Result<Option<Match>, MatchError> { let (fwd, rev) = (self.forward(), self.reverse()); let end = match fwd.try_search_fwd(input)? { None => return Ok(None), Some(end) => end, }; // This special cases an empty match at the beginning of the search. If // our end matches our start, then since a reverse DFA can't match past // the start, it must follow that our starting position is also our end // position. So short circuit and skip the reverse search. if input.start() == end.offset() { return Ok(Some(Match::new( end.pattern(), end.offset()..end.offset(), ))); } // We can also skip the reverse search if we know our search was // anchored. This occurs either when the input config is anchored or // when we know the regex itself is anchored. In this case, we know the // start of the match, if one is found, must be the start of the // search. if self.is_anchored(input) { return Ok(Some(Match::new( end.pattern(), input.start()..end.offset(), ))); } // N.B. I have tentatively convinced myself that it isn't necessary // to specify the specific pattern for the reverse search since the // reverse search will always find the same pattern to match as the // forward search. But I lack a rigorous proof. Why not just provide // the pattern anyway? Well, if it is needed, then leaving it out // gives us a chance to find a witness. (Also, if we don't need to // specify the pattern, then we don't need to build the reverse DFA // with 'starts_for_each_pattern' enabled.) // // We also need to be careful to disable 'earliest' for the reverse // search, since it could be enabled for the forward search. In the // reverse case, to satisfy "leftmost" criteria, we need to match // as much as we can. We also need to be careful to make the search // anchored. We don't want the reverse search to report any matches // other than the one beginning at the end of our forward search. let revsearch = input .clone() .span(input.start()..end.offset()) .anchored(Anchored::Yes) .earliest(false); let start = rev .try_search_rev(&revsearch)? .expect("reverse search must match if forward search does"); assert_eq!( start.pattern(), end.pattern(), "forward and reverse search must match same pattern", ); assert!(start.offset() <= end.offset()); Ok(Some(Match::new(end.pattern(), start.offset()..end.offset()))) } /// Returns true if either the given input specifies an anchored search /// or if the underlying DFA is always anchored. fn is_anchored(&self, input: &Input<'_>) -> bool { match input.get_anchored() { Anchored::No => self.forward().is_always_start_anchored(), Anchored::Yes | Anchored::Pattern(_) => true, } } } /// Non-search APIs for querying information about the regex and setting a /// prefilter. impl<A: Automaton> Regex<A> { /// Return the underlying DFA responsible for forward matching. /// /// This is useful for accessing the underlying DFA and converting it to /// some other format or size. See the [`Builder::build_from_dfas`] docs /// for an example of where this might be useful. pub fn forward(&self) -> &A { &self.forward } /// Return the underlying DFA responsible for reverse matching. /// /// This is useful for accessing the underlying DFA and converting it to /// some other format or size. See the [`Builder::build_from_dfas`] docs /// for an example of where this might be useful. pub fn reverse(&self) -> &A { &self.reverse } /// Returns the total number of patterns matched by this regex. /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::dfa::regex::Regex; /// /// let re = Regex::new_many(&[r"[a-z]+", r"[0-9]+", r"\w+"])?; /// assert_eq!(3, re.pattern_len()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn pattern_len(&self) -> usize { assert_eq!(self.forward().pattern_len(), self.reverse().pattern_len()); self.forward().pattern_len() } } /// An iterator over all non-overlapping matches for an infallible search. /// /// The iterator yields a [`Match`] value until no more matches could be found. /// If the underlying regex engine returns an error, then a panic occurs. /// /// The type parameters are as follows: /// /// * `A` represents the type of the underlying DFA that implements the /// [`Automaton`] trait. /// /// The lifetime parameters are as follows: /// /// * `'h` represents the lifetime of the haystack being searched. /// * `'r` represents the lifetime of the regex object itself. /// /// This iterator can be created with the [`Regex::find_iter`] method. #[derive(Debug)] pub struct FindMatches<'r, 'h, A> { re: &'r Regex<A>, it: iter::Searcher<'h>, } impl<'r, 'h, A: Automaton> Iterator for FindMatches<'r, 'h, A> { type Item = Match; #[inline] fn next(&mut self) -> Option<Match> { let FindMatches { re, ref mut it } = *self; it.advance(|input| re.try_search(input)) } } /// A builder for a regex based on deterministic finite automatons. /// /// This builder permits configuring options for the syntax of a pattern, the /// NFA construction, the DFA construction and finally the regex searching /// itself. This builder is different from a general purpose regex builder in /// that it permits fine grain configuration of the construction process. The /// trade off for this is complexity, and the possibility of setting a /// configuration that might not make sense. For example, there are two /// different UTF-8 modes: /// /// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls /// whether the pattern itself can contain sub-expressions that match invalid /// UTF-8. /// * [`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) controls /// how the regex iterators themselves advance the starting position of the /// next search when a match with zero length is found. /// /// Generally speaking, callers will want to either enable all of these or /// disable all of these. /// /// Internally, building a regex requires building two DFAs, where one is /// responsible for finding the end of a match and the other is responsible /// for finding the start of a match. If you only need to detect whether /// something matched, or only the end of a match, then you should use a /// [`dense::Builder`] to construct a single DFA, which is cheaper than /// building two DFAs. /// /// # Build methods /// /// This builder has a few "build" methods. In general, it's the result of /// combining the following parameters: /// /// * Building one or many regexes. /// * Building a regex with dense or sparse DFAs. /// /// The simplest "build" method is [`Builder::build`]. It accepts a single /// pattern and builds a dense DFA using `usize` for the state identifier /// representation. /// /// The most general "build" method is [`Builder::build_many`], which permits /// building a regex that searches for multiple patterns simultaneously while /// using a specific state identifier representation. /// /// The most flexible "build" method, but hardest to use, is /// [`Builder::build_from_dfas`]. This exposes the fact that a [`Regex`] is /// just a pair of DFAs, and this method allows you to specify those DFAs /// exactly. /// /// # Example /// /// This example shows how to disable UTF-8 mode in the syntax and the regex /// itself. This is generally what you want for matching on arbitrary bytes. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::regex::Regex, nfa::thompson, util::syntax, Match, /// }; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 1..9)); /// let got = re.find(haystack); /// assert_eq!(expected, got); /// // Notice that `(?-u:[^b])` matches invalid UTF-8, /// // but the subsequent `.*` does not! Disabling UTF-8 /// // on the syntax permits this. /// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Builder { #[cfg(feature = "dfa-build")] dfa: dense::Builder, } impl Builder { /// Create a new regex builder with the default configuration. pub fn new() -> Builder { Builder { #[cfg(feature = "dfa-build")] dfa: dense::Builder::new(), } } /// Build a regex from the given pattern. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(all(feature = "syntax", feature = "dfa-build"))] pub fn build(&self, pattern: &str) -> Result<Regex, BuildError> { self.build_many(&[pattern]) } /// Build a regex from the given pattern using sparse DFAs. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(all(feature = "syntax", feature = "dfa-build"))] pub fn build_sparse( &self, pattern: &str, ) -> Result<Regex<sparse::DFA<Vec<u8>>>, BuildError> { self.build_many_sparse(&[pattern]) } /// Build a regex from the given patterns. #[cfg(all(feature = "syntax", feature = "dfa-build"))] pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<Regex, BuildError> { let forward = self.dfa.build_many(patterns)?; let reverse = self .dfa .clone() .configure( dense::Config::new() .prefilter(None) .specialize_start_states(false) .start_kind(StartKind::Anchored) .match_kind(MatchKind::All), ) .thompson(crate::nfa::thompson::Config::new().reverse(true)) .build_many(patterns)?; Ok(self.build_from_dfas(forward, reverse)) } /// Build a sparse regex from the given patterns. #[cfg(all(feature = "syntax", feature = "dfa-build"))] pub fn build_many_sparse<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<Regex<sparse::DFA<Vec<u8>>>, BuildError> { let re = self.build_many(patterns)?; let forward = re.forward().to_sparse()?; let reverse = re.reverse().to_sparse()?; Ok(self.build_from_dfas(forward, reverse)) } /// Build a regex from its component forward and reverse DFAs. /// /// This is useful when deserializing a regex from some arbitrary /// memory region. This is also useful for building regexes from other /// types of DFAs. /// /// If you're building the DFAs from scratch instead of building new DFAs /// from other DFAs, then you'll need to make sure that the reverse DFA is /// configured correctly to match the intended semantics. Namely: /// /// * It should be anchored. /// * It should use [`MatchKind::All`] semantics. /// * It should match in reverse. /// * Otherwise, its configuration should match the forward DFA. /// /// If these conditions aren't satisfied, then the behavior of searches is /// unspecified. /// /// Note that when using this constructor, no configuration is applied. /// Since this routine provides the DFAs to the builder, there is no /// opportunity to apply other configuration options. /// /// # Example /// /// This example is a bit a contrived. The usual use of these methods /// would involve serializing `initial_re` somewhere and then deserializing /// it later to build a regex. But in this case, we do everything in /// memory. /// /// ``` /// use regex_automata::dfa::regex::Regex; /// /// let initial_re = Regex::new("foo[0-9]+")?; /// assert_eq!(true, initial_re.is_match(b"foo123")); /// /// let (fwd, rev) = (initial_re.forward(), initial_re.reverse()); /// let re = Regex::builder().build_from_dfas(fwd, rev); /// assert_eq!(true, re.is_match(b"foo123")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This example shows how to build a `Regex` that uses sparse DFAs instead /// of dense DFAs without using one of the convenience `build_sparse` /// routines: /// /// ``` /// use regex_automata::dfa::regex::Regex; /// /// let initial_re = Regex::new("foo[0-9]+")?; /// assert_eq!(true, initial_re.is_match(b"foo123")); /// /// let fwd = initial_re.forward().to_sparse()?; /// let rev = initial_re.reverse().to_sparse()?; /// let re = Regex::builder().build_from_dfas(fwd, rev); /// assert_eq!(true, re.is_match(b"foo123")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_from_dfas<A: Automaton>( &self, forward: A, reverse: A, ) -> Regex<A> { Regex { forward, reverse } } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. #[cfg(all(feature = "syntax", feature = "dfa-build"))] pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { self.dfa.syntax(config); self } /// Set the Thompson NFA configuration for this builder using /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). /// /// This permits setting things like whether additional time should be /// spent shrinking the size of the NFA. #[cfg(all(feature = "syntax", feature = "dfa-build"))] pub fn thompson( &mut self, config: crate::nfa::thompson::Config, ) -> &mut Builder { self.dfa.thompson(config); self } /// Set the dense DFA compilation configuration for this builder using /// [`dense::Config`](dense::Config). /// /// This permits setting things like whether the underlying DFAs should /// be minimized. #[cfg(feature = "dfa-build")] pub fn dense(&mut self, config: dense::Config) -> &mut Builder { self.dfa.configure(config); self } } impl Default for Builder { fn default() -> Builder { Builder::new() } } <file_sep>/regex-automata/src/nfa/thompson/error.rs use crate::util::{ captures, look, primitives::{PatternID, StateID}, }; /// An error that can occurred during the construction of a thompson NFA. /// /// This error does not provide many introspection capabilities. There are /// generally only two things you can do with it: /// /// * Obtain a human readable message via its `std::fmt::Display` impl. /// * Access an underlying [`regex_syntax::Error`] type from its `source` /// method via the `std::error::Error` trait. This error only occurs when using /// convenience routines for building an NFA directly from a pattern string. /// /// Otherwise, errors typically occur when a limit has been breeched. For /// example, if the total heap usage of the compiled NFA exceeds the limit /// set by [`Config::nfa_size_limit`](crate::nfa::thompson::Config), then /// building the NFA will fail. #[derive(Clone, Debug)] pub struct BuildError { kind: BuildErrorKind, } /// The kind of error that occurred during the construction of a thompson NFA. #[derive(Clone, Debug)] enum BuildErrorKind { /// An error that occurred while parsing a regular expression. Note that /// this error may be printed over multiple lines, and is generally /// intended to be end user readable on its own. #[cfg(feature = "syntax")] Syntax(regex_syntax::Error), /// An error that occurs if the capturing groups provided to an NFA builder /// do not satisfy the documented invariants. For example, things like /// too many groups, missing groups, having the first (zeroth) group be /// named or duplicate group names within the same pattern. Captures(captures::GroupInfoError), /// An error that occurs when an NFA contains a Unicode word boundary, but /// where the crate was compiled without the necessary data for dealing /// with Unicode word boundaries. Word(look::UnicodeWordBoundaryError), /// An error that occurs if too many patterns were given to the NFA /// compiler. TooManyPatterns { /// The number of patterns given, which exceeds the limit. given: usize, /// The limit on the number of patterns. limit: usize, }, /// An error that occurs if too states are produced while building an NFA. TooManyStates { /// The minimum number of states that are desired, which exceeds the /// limit. given: usize, /// The limit on the number of states. limit: usize, }, /// An error that occurs when NFA compilation exceeds a configured heap /// limit. ExceededSizeLimit { /// The configured limit, in bytes. limit: usize, }, /// An error that occurs when an invalid capture group index is added to /// the NFA. An "invalid" index can be one that would otherwise overflow /// a `usize` on the current target. InvalidCaptureIndex { /// The invalid index that was given. index: u32, }, /// An error that occurs when one tries to build a reverse NFA with /// captures enabled. Currently, this isn't supported, but we probably /// should support it at some point. #[cfg(feature = "syntax")] UnsupportedCaptures, } impl BuildError { /// If this error occurred because the NFA exceeded the configured size /// limit before being built, then this returns the configured size limit. /// /// The limit returned is what was configured, and corresponds to the /// maximum amount of heap usage in bytes. pub fn size_limit(&self) -> Option<usize> { match self.kind { BuildErrorKind::ExceededSizeLimit { limit } => Some(limit), _ => None, } } fn kind(&self) -> &BuildErrorKind { &self.kind } #[cfg(feature = "syntax")] pub(crate) fn syntax(err: regex_syntax::Error) -> BuildError { BuildError { kind: BuildErrorKind::Syntax(err) } } pub(crate) fn captures(err: captures::GroupInfoError) -> BuildError { BuildError { kind: BuildErrorKind::Captures(err) } } pub(crate) fn word(err: look::UnicodeWordBoundaryError) -> BuildError { BuildError { kind: BuildErrorKind::Word(err) } } pub(crate) fn too_many_patterns(given: usize) -> BuildError { let limit = PatternID::LIMIT; BuildError { kind: BuildErrorKind::TooManyPatterns { given, limit } } } pub(crate) fn too_many_states(given: usize) -> BuildError { let limit = StateID::LIMIT; BuildError { kind: BuildErrorKind::TooManyStates { given, limit } } } pub(crate) fn exceeded_size_limit(limit: usize) -> BuildError { BuildError { kind: BuildErrorKind::ExceededSizeLimit { limit } } } pub(crate) fn invalid_capture_index(index: u32) -> BuildError { BuildError { kind: BuildErrorKind::InvalidCaptureIndex { index } } } #[cfg(feature = "syntax")] pub(crate) fn unsupported_captures() -> BuildError { BuildError { kind: BuildErrorKind::UnsupportedCaptures } } } #[cfg(feature = "std")] impl std::error::Error for BuildError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self.kind() { #[cfg(feature = "syntax")] BuildErrorKind::Syntax(ref err) => Some(err), BuildErrorKind::Captures(ref err) => Some(err), _ => None, } } } impl core::fmt::Display for BuildError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self.kind() { #[cfg(feature = "syntax")] BuildErrorKind::Syntax(_) => write!(f, "error parsing regex"), BuildErrorKind::Captures(_) => { write!(f, "error with capture groups") } BuildErrorKind::Word(_) => { write!(f, "NFA contains Unicode word boundary") } BuildErrorKind::TooManyPatterns { given, limit } => write!( f, "attempted to compile {} patterns, \ which exceeds the limit of {}", given, limit, ), BuildErrorKind::TooManyStates { given, limit } => write!( f, "attempted to compile {} NFA states, \ which exceeds the limit of {}", given, limit, ), BuildErrorKind::ExceededSizeLimit { limit } => write!( f, "heap usage during NFA compilation exceeded limit of {}", limit, ), BuildErrorKind::InvalidCaptureIndex { index } => write!( f, "capture group index {} is invalid (too big or discontinuous)", index, ), #[cfg(feature = "syntax")] BuildErrorKind::UnsupportedCaptures => write!( f, "currently captures must be disabled when compiling \ a reverse NFA", ), } } } <file_sep>/regex-automata/src/meta/regex.rs use core::{ borrow::Borrow, panic::{RefUnwindSafe, UnwindSafe}, }; use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; use regex_syntax::{ ast, hir::{self, Hir}, }; use crate::{ meta::{ error::BuildError, strategy::{self, Strategy}, wrappers, }, nfa::thompson::WhichCaptures, util::{ captures::{Captures, GroupInfo}, iter, pool::{Pool, PoolGuard}, prefilter::Prefilter, primitives::{NonMaxUsize, PatternID}, search::{HalfMatch, Input, Match, MatchKind, PatternSet, Span}, }, }; /// A type alias for our pool of meta::Cache that fixes the type parameters to /// what we use for the meta regex below. type CachePool = Pool<Cache, CachePoolFn>; /// Same as above, but for the guard returned by a pool. type CachePoolGuard<'a> = PoolGuard<'a, Cache, CachePoolFn>; /// The type of the closure we use to create new caches. We need to spell out /// all of the marker traits or else we risk leaking !MARKER impls. type CachePoolFn = Box<dyn Fn() -> Cache + Send + Sync + UnwindSafe + RefUnwindSafe>; /// A regex matcher that works by composing several other regex matchers /// automatically. /// /// In effect, a meta regex papers over a lot of the quirks or performance /// problems in each of the regex engines in this crate. Its goal is to provide /// an infallible and simple API that "just does the right thing" in the common /// case. /// /// A meta regex is the implementation of a `Regex` in the `regex` crate. /// Indeed, the `regex` crate API is essentially just a light wrapper over /// this type. This includes the `regex` crate's `RegexSet` API! /// /// # Composition /// /// This is called a "meta" matcher precisely because it uses other regex /// matchers to provide a convenient high level regex API. Here are some /// examples of how other regex matchers are composed: /// /// * When calling [`Regex::captures`], instead of immediately /// running a slower but more capable regex engine like the /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM), the meta regex engine /// will usually first look for the bounds of a match with a higher throughput /// regex engine like a [lazy DFA](crate::hybrid). Only when a match is found /// is a slower engine like `PikeVM` used to find the matching span for each /// capture group. /// * While higher throughout engines like the lazy DFA cannot handle /// Unicode word boundaries in general, they can still be used on pure ASCII /// haystacks by pretending that Unicode word boundaries are just plain ASCII /// word boundaries. However, if a haystack is not ASCII, the meta regex engine /// will automatically switch to a (possibly slower) regex engine that supports /// Unicode word boundaries in general. /// * In some cases where a regex pattern is just a simple literal or a small /// set of literals, an actual regex engine won't be used at all. Instead, /// substring or multi-substring search algorithms will be employed. /// /// There are many other forms of composition happening too, but the above /// should give a general idea. In particular, it may perhaps be surprising /// that *multiple* regex engines might get executed for a single search. That /// is, the decision of what regex engine to use is not _just_ based on the /// pattern, but also based on the dynamic execution of the search itself. /// /// The primary reason for this composition is performance. The fundamental /// tension is that the faster engines tend to be less capable, and the more /// capable engines tend to be slower. /// /// Note that the forms of composition that are allowed are determined by /// compile time crate features and configuration. For example, if the `hybrid` /// feature isn't enabled, or if [`Config::hybrid`] has been disabled, then the /// meta regex engine will never use a lazy DFA. /// /// # Synchronization and cloning /// /// Most of the regex engines in this crate require some kind of mutable /// "scratch" space to read and write from while performing a search. Since /// a meta regex composes these regex engines, a meta regex also requires /// mutable scratch space. This scratch space is called a [`Cache`]. /// /// Most regex engines _also_ usually have a read-only component, typically /// a [Thompson `NFA`](crate::nfa::thompson::NFA). /// /// In order to make the `Regex` API convenient, most of the routines hide /// the fact that a `Cache` is needed at all. To achieve this, a [memory /// pool](crate::util::pool::Pool) is used internally to retrieve `Cache` /// values in a thread safe way that also permits reuse. This in turn implies /// that every such search call requires some form of synchronization. Usually /// this synchronization is fast enough to not notice, but in some cases, it /// can be a bottleneck. This typically occurs when all of the following are /// true: /// /// * The same `Regex` is shared across multiple threads simultaneously, /// usually via a [`util::lazy::Lazy`](crate::util::lazy::Lazy) or something /// similar from the `once_cell` or `lazy_static` crates. /// * The primary unit of work in each thread is a regex search. /// * Searches are run on very short haystacks. /// /// This particular case can lead to high contention on the pool used by a /// `Regex` internally, which can in turn increase latency to a noticeable /// effect. This cost can be mitigated in one of the following ways: /// /// * Use a distinct copy of a `Regex` in each thread, usually by cloning it. /// Cloning a `Regex` _does not_ do a deep copy of its read-only component. /// But it does lead to each `Regex` having its own memory pool, which in /// turn eliminates the problem of contention. In general, this technique should /// not result in any additional memory usage when compared to sharing the same /// `Regex` across multiple threads simultaneously. /// * Use lower level APIs, like [`Regex::search_with`], which permit passing /// a `Cache` explicitly. In this case, it is up to you to determine how best /// to provide a `Cache`. For example, you might put a `Cache` in thread-local /// storage if your use case allows for it. /// /// Overall, this is an issue that happens rarely in practice, but it can /// happen. /// /// # Warning: spin-locks may be used in alloc-only mode /// /// When this crate is built without the `std` feature and the high level APIs /// on a `Regex` are used, then a spin-lock will be used to synchronize access /// to an internal pool of `Cache` values. This may be undesirable because /// a spin-lock is [effectively impossible to implement correctly in user /// space][spinlocks-are-bad]. That is, more concretely, the spin-lock could /// result in a deadlock. /// /// [spinlocks-are-bad]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html /// /// If one wants to avoid the use of spin-locks when the `std` feature is /// disabled, then you must use APIs that accept a `Cache` value explicitly. /// For example, [`Regex::search_with`]. /// /// # Example /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")?; /// assert!(re.is_match("2010-03-14")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: anchored search /// /// This example shows how to use [`Input::anchored`] to run an anchored /// search, even when the regex pattern itself isn't anchored. An anchored /// search guarantees that if a match is found, then the start offset of the /// match corresponds to the offset at which the search was started. /// /// ``` /// use regex_automata::{meta::Regex, Anchored, Input, Match}; /// /// let re = Regex::new(r"\bfoo\b")?; /// let input = Input::new("xx foo xx").range(3..).anchored(Anchored::Yes); /// // The offsets are in terms of the original haystack. /// assert_eq!(Some(Match::must(0, 3..6)), re.find(input)); /// /// // Notice that no match occurs here, because \b still takes the /// // surrounding context into account, even if it means looking back /// // before the start of your search. /// let hay = "xxfoo xx"; /// let input = Input::new(hay).range(2..).anchored(Anchored::Yes); /// assert_eq!(None, re.find(input)); /// // Indeed, you cannot achieve the above by simply slicing the /// // haystack itself, since the regex engine can't see the /// // surrounding context. This is why 'Input' permits setting /// // the bounds of a search! /// let input = Input::new(&hay[2..]).anchored(Anchored::Yes); /// // WRONG! /// assert_eq!(Some(Match::must(0, 0..3)), re.find(input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: earliest search /// /// This example shows how to use [`Input::earliest`] to run a search that /// might stop before finding the typical leftmost match. /// /// ``` /// use regex_automata::{meta::Regex, Anchored, Input, Match}; /// /// let re = Regex::new(r"[a-z]{3}|b")?; /// let input = Input::new("abc").earliest(true); /// assert_eq!(Some(Match::must(0, 1..2)), re.find(input)); /// /// // Note that "earliest" isn't really a match semantic unto itself. /// // Instead, it is merely an instruction to whatever regex engine /// // gets used internally to quit as soon as it can. For example, /// // this regex uses a different search technique, and winds up /// // producing a different (but valid) match! /// let re = Regex::new(r"abc|b")?; /// let input = Input::new("abc").earliest(true); /// assert_eq!(Some(Match::must(0, 0..3)), re.find(input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: change the line terminator /// /// This example shows how to enable multi-line mode by default and change /// the line terminator to the NUL byte: /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().multi_line(true)) /// .configure(Regex::config().line_terminator(b'\x00')) /// .build(r"^foo$")?; /// let hay = "\x00foo\x00"; /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Debug)] pub struct Regex { /// The actual regex implementation. imp: Arc<RegexI>, /// A thread safe pool of caches. /// /// For the higher level search APIs, a `Cache` is automatically plucked /// from this pool before running a search. The lower level `with` methods /// permit the caller to provide their own cache, thereby bypassing /// accesses to this pool. /// /// Note that we put this outside the `Arc` so that cloning a `Regex` /// results in creating a fresh `CachePool`. This in turn permits callers /// to clone regexes into separate threads where each such regex gets /// the pool's "thread owner" optimization. Otherwise, if one shares the /// `Regex` directly, then the pool will go through a slower mutex path for /// all threads except for the "owner." pool: CachePool, } /// The internal implementation of `Regex`, split out so that it can be wrapped /// in an `Arc`. #[derive(Debug)] struct RegexI { /// The core matching engine. /// /// Why is this reference counted when RegexI is already wrapped in an Arc? /// Well, we need to capture this in a closure to our `Pool` below in order /// to create new `Cache` values when needed. So since it needs to be in /// two places, we make it reference counted. /// /// We make `RegexI` itself reference counted too so that `Regex` itself /// stays extremely small and very cheap to clone. strat: Arc<dyn Strategy>, /// Metadata about the regexes driving the strategy. The metadata is also /// usually stored inside the strategy too, but we put it here as well /// so that we can get quick access to it (without virtual calls) before /// executing the regex engine. For example, we use this metadata to /// detect a subset of cases where we know a match is impossible, and can /// thus avoid calling into the strategy at all. /// /// Since `RegexInfo` is stored in multiple places, it is also reference /// counted. info: RegexInfo, } /// Convenience constructors for a `Regex` using the default configuration. impl Regex { /// Builds a `Regex` from a single pattern string using the default /// configuration. /// /// If there was a problem parsing the pattern or a problem turning it into /// a regex matcher, then an error is returned. /// /// If you want to change the configuration of a `Regex`, use a [`Builder`] /// with a [`Config`]. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::new(r"(?Rm)^foo$")?; /// let hay = "\r\nfoo\r\n"; /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new(pattern: &str) -> Result<Regex, BuildError> { Regex::builder().build(pattern) } /// Builds a `Regex` from many pattern strings using the default /// configuration. /// /// If there was a problem parsing any of the patterns or a problem turning /// them into a regex matcher, then an error is returned. /// /// If you want to change the configuration of a `Regex`, use a [`Builder`] /// with a [`Config`]. /// /// # Example: simple lexer /// /// This simplistic example leverages the multi-pattern support to build a /// simple little lexer. The pattern ID in the match tells you which regex /// matched, which in turn might be used to map back to the "type" of the /// token returned by the lexer. /// /// ``` /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::new_many(&[ /// r"[[:space:]]", /// r"[A-Za-z0-9][A-Za-z0-9_]+", /// r"->", /// r".", /// ])?; /// let haystack = "fn is_boss(bruce: i32, springsteen: String) -> bool;"; /// let matches: Vec<Match> = re.find_iter(haystack).collect(); /// assert_eq!(matches, vec![ /// Match::must(1, 0..2), // 'fn' /// Match::must(0, 2..3), // ' ' /// Match::must(1, 3..10), // 'is_boss' /// Match::must(3, 10..11), // '(' /// Match::must(1, 11..16), // 'bruce' /// Match::must(3, 16..17), // ':' /// Match::must(0, 17..18), // ' ' /// Match::must(1, 18..21), // 'i32' /// Match::must(3, 21..22), // ',' /// Match::must(0, 22..23), // ' ' /// Match::must(1, 23..34), // 'springsteen' /// Match::must(3, 34..35), // ':' /// Match::must(0, 35..36), // ' ' /// Match::must(1, 36..42), // 'String' /// Match::must(3, 42..43), // ')' /// Match::must(0, 43..44), // ' ' /// Match::must(2, 44..46), // '->' /// Match::must(0, 46..47), // ' ' /// Match::must(1, 47..51), // 'bool' /// Match::must(3, 51..52), // ';' /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// One can write a lexer like the above using a regex like /// `(?P<space>[[:space:]])|(?P<ident>[A-Za-z0-9][A-Za-z0-9_]+)|...`, /// but then you need to ask whether capture group matched to determine /// which branch in the regex matched, and thus, which token the match /// corresponds to. In contrast, the above example includes the pattern ID /// in the match. There's no need to use capture groups at all. /// /// # Example: finding the pattern that caused an error /// /// When a syntax error occurs, it is possible to ask which pattern /// caused the syntax error. /// /// ``` /// use regex_automata::{meta::Regex, PatternID}; /// /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err(); /// assert_eq!(Some(PatternID::must(2)), err.pattern()); /// ``` /// /// # Example: zero patterns is valid /// /// Building a regex with zero patterns results in a regex that never /// matches anything. Because this routine is generic, passing an empty /// slice usually requires a turbo-fish (or something else to help type /// inference). /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::new_many::<&str>(&[])?; /// assert_eq!(None, re.find("")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new_many<P: AsRef<str>>( patterns: &[P], ) -> Result<Regex, BuildError> { Regex::builder().build_many(patterns) } /// Return a default configuration for a `Regex`. /// /// This is a convenience routine to avoid needing to import the [`Config`] /// type when customizing the construction of a `Regex`. /// /// # Example: lower the NFA size limit /// /// In some cases, the default size limit might be too big. The size limit /// can be lowered, which will prevent large regex patterns from compiling. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let result = Regex::builder() /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) /// // Not even 20KB is enough to build a single large Unicode class! /// .build(r"\pL"); /// assert!(result.is_err()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn config() -> Config { Config::new() } /// Return a builder for configuring the construction of a `Regex`. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. /// /// # Example: change the line terminator /// /// This example shows how to enable multi-line mode by default and change /// the line terminator to the NUL byte: /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().multi_line(true)) /// .configure(Regex::config().line_terminator(b'\x00')) /// .build(r"^foo$")?; /// let hay = "\x00foo\x00"; /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn builder() -> Builder { Builder::new() } } /// High level convenience routines for using a regex to search a haystack. impl Regex { /// Returns true if and only if this regex matches the given haystack. /// /// This routine may short circuit if it knows that scanning future input /// will never lead to a different result. (Consider how this might make /// a difference given the regex `a+` on the haystack `aaaaaaaaaaaaaaa`. /// This routine _may_ stop after it sees the first `a`, but routines like /// `find` need to continue searching because `+` is greedy by default.) /// /// # Example /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new("foo[0-9]+bar")?; /// /// assert!(re.is_match("foo12345bar")); /// assert!(!re.is_match("foobar")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: consistency with search APIs /// /// `is_match` is guaranteed to return `true` whenever `find` returns a /// match. This includes searches that are executed entirely within a /// codepoint: /// /// ``` /// use regex_automata::{meta::Regex, Input}; /// /// let re = Regex::new("a*")?; /// /// // This doesn't match because the default configuration bans empty /// // matches from splitting a codepoint. /// assert!(!re.is_match(Input::new("☃").span(1..2))); /// assert_eq!(None, re.find(Input::new("☃").span(1..2))); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Notice that when UTF-8 mode is disabled, then the above reports a /// match because the restriction against zero-width matches that split a /// codepoint has been lifted: /// /// ``` /// use regex_automata::{meta::Regex, Input, Match}; /// /// let re = Regex::builder() /// .configure(Regex::config().utf8_empty(false)) /// .build("a*")?; /// /// assert!(re.is_match(Input::new("☃").span(1..2))); /// assert_eq!( /// Some(Match::must(0, 1..1)), /// re.find(Input::new("☃").span(1..2)), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// A similar idea applies when using line anchors with CRLF mode enabled, /// which prevents them from matching between a `\r` and a `\n`. /// /// ``` /// use regex_automata::{meta::Regex, Input, Match}; /// /// let re = Regex::new(r"(?Rm:$)")?; /// assert!(!re.is_match(Input::new("\r\n").span(1..1))); /// // A regular line anchor, which only considers \n as a /// // line terminator, will match. /// let re = Regex::new(r"(?m:$)")?; /// assert!(re.is_match(Input::new("\r\n").span(1..1))); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_match<'h, I: Into<Input<'h>>>(&self, input: I) -> bool { let input = input.into().earliest(true); if self.imp.info.is_impossible(&input) { return false; } let mut guard = self.pool.get(); let result = self.imp.strat.is_match(&mut guard, &input); // See 'Regex::search' for why we put the guard back explicitly. PoolGuard::put(guard); result } /// Executes a leftmost search and returns the first match that is found, /// if one exists. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::new("foo[0-9]+")?; /// assert_eq!(Some(Match::must(0, 0..8)), re.find("foo12345")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find<'h, I: Into<Input<'h>>>(&self, input: I) -> Option<Match> { self.search(&input.into()) } /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided [`Captures`] /// value. If no match was found, then [`Captures::is_match`] is guaranteed /// to return `false`. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Span}; /// /// let re = Regex::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; /// let mut caps = re.create_captures(); /// /// re.captures("2010-03-14", &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn captures<'h, I: Into<Input<'h>>>( &self, input: I, caps: &mut Captures, ) { self.search_captures(&input.into(), caps) } /// Returns an iterator over all non-overlapping leftmost matches in /// the given haystack. If no match exists, then the iterator yields no /// elements. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::new("foo[0-9]+")?; /// let haystack = "foo1 foo12 foo123"; /// let matches: Vec<Match> = re.find_iter(haystack).collect(); /// assert_eq!(matches, vec![ /// Match::must(0, 0..4), /// Match::must(0, 5..10), /// Match::must(0, 11..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find_iter<'r, 'h, I: Into<Input<'h>>>( &'r self, input: I, ) -> FindMatches<'r, 'h> { let cache = self.pool.get(); let it = iter::Searcher::new(input.into()); FindMatches { re: self, cache, it } } /// Returns an iterator over all non-overlapping `Captures` values. If no /// match exists, then the iterator yields no elements. /// /// This yields the same matches as [`Regex::find_iter`], but it includes /// the spans of all capturing groups that participate in each match. /// /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for /// how to correctly iterate over all matches in a haystack while avoiding /// the creation of a new `Captures` value for every match. (Which you are /// forced to do with an `Iterator`.) /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Span}; /// /// let re = Regex::new("foo(?P<numbers>[0-9]+)")?; /// /// let haystack = "foo1 foo12 foo123"; /// let matches: Vec<Span> = re /// .captures_iter(haystack) /// // The unwrap is OK since 'numbers' matches if the pattern matches. /// .map(|caps| caps.get_group_by_name("numbers").unwrap()) /// .collect(); /// assert_eq!(matches, vec![ /// Span::from(3..4), /// Span::from(8..10), /// Span::from(14..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn captures_iter<'r, 'h, I: Into<Input<'h>>>( &'r self, input: I, ) -> CapturesMatches<'r, 'h> { let cache = self.pool.get(); let caps = self.create_captures(); let it = iter::Searcher::new(input.into()); CapturesMatches { re: self, cache, caps, it } } /// Returns an iterator of spans of the haystack given, delimited by a /// match of the regex. Namely, each element of the iterator corresponds to /// a part of the haystack that *isn't* matched by the regular expression. /// /// # Example /// /// To split a string delimited by arbitrary amounts of spaces or tabs: /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r"[ \t]+")?; /// let hay = "a b \t c\td e"; /// let fields: Vec<&str> = re.split(hay).map(|span| &hay[span]).collect(); /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: more cases /// /// Basic usage: /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r" ")?; /// let hay = "Mary had a little lamb"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]); /// /// let re = Regex::new(r"X")?; /// let hay = ""; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec![""]); /// /// let re = Regex::new(r"X")?; /// let hay = "lionXXtigerXleopard"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]); /// /// let re = Regex::new(r"::")?; /// let hay = "lion::tiger::leopard"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["lion", "tiger", "leopard"]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// If a haystack contains multiple contiguous matches, you will end up /// with empty spans yielded by the iterator: /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r"X")?; /// let hay = "XXXXaXXbXc"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); /// /// let re = Regex::new(r"/")?; /// let hay = "(///)"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["(", "", "", ")"]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Separators at the start or end of a haystack are neighbored by empty /// spans. /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r"0")?; /// let hay = "010"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["", "1", ""]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// When the empty string is used as a regex, it splits at every valid /// UTF-8 boundary by default (which includes the beginning and end of the /// haystack): /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r"")?; /// let hay = "rust"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]); /// /// // Splitting by an empty string is UTF-8 aware by default! /// let re = Regex::new(r"")?; /// let hay = "☃"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["", "☃", ""]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// But note that UTF-8 mode for empty strings can be disabled, which will /// then result in a match at every byte offset in the haystack, /// including between every UTF-8 code unit. /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::builder() /// .configure(Regex::config().utf8_empty(false)) /// .build(r"")?; /// let hay = "☃".as_bytes(); /// let got: Vec<&[u8]> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec![ /// // Writing byte string slices is just brutal. The problem is that /// // b"foo" has type &[u8; 3] instead of &[u8]. /// &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..], /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Contiguous separators (commonly shows up with whitespace), can lead to /// possibly surprising behavior. For example, this code is correct: /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r" ")?; /// let hay = " a b c"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want /// to match contiguous space characters: /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r" +")?; /// let hay = " a b c"; /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); /// // N.B. This does still include a leading empty span because ' +' /// // matches at the beginning of the haystack. /// assert_eq!(got, vec!["", "a", "b", "c"]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn split<'r, 'h, I: Into<Input<'h>>>( &'r self, input: I, ) -> Split<'r, 'h> { Split { finder: self.find_iter(input), last: 0 } } /// Returns an iterator of at most `limit` spans of the haystack given, /// delimited by a match of the regex. (A `limit` of `0` will return no /// spans.) Namely, each element of the iterator corresponds to a part /// of the haystack that *isn't* matched by the regular expression. The /// remainder of the haystack that is not split will be the last element in /// the iterator. /// /// # Example /// /// Get the first two words in some haystack: /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r"\W+").unwrap(); /// let hay = "Hey! How are you?"; /// let fields: Vec<&str> = /// re.splitn(hay, 3).map(|span| &hay[span]).collect(); /// assert_eq!(fields, vec!["Hey", "How", "are you?"]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Examples: more cases /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r" ")?; /// let hay = "Mary had a little lamb"; /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]); /// /// let re = Regex::new(r"X")?; /// let hay = ""; /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec![""]); /// /// let re = Regex::new(r"X")?; /// let hay = "lionXXtigerXleopard"; /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]); /// /// let re = Regex::new(r"::")?; /// let hay = "lion::tiger::leopard"; /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["lion", "tiger::leopard"]); /// /// let re = Regex::new(r"X")?; /// let hay = "abcXdef"; /// let got: Vec<&str> = re.splitn(hay, 1).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["abcXdef"]); /// /// let re = Regex::new(r"X")?; /// let hay = "abcdef"; /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect(); /// assert_eq!(got, vec!["abcdef"]); /// /// let re = Regex::new(r"X")?; /// let hay = "abcXdef"; /// let got: Vec<&str> = re.splitn(hay, 0).map(|sp| &hay[sp]).collect(); /// assert!(got.is_empty()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn splitn<'r, 'h, I: Into<Input<'h>>>( &'r self, input: I, limit: usize, ) -> SplitN<'r, 'h> { SplitN { splits: self.split(input), limit } } } /// Lower level search routines that give more control. impl Regex { /// Returns the start and end offset of the leftmost match. If no match /// exists, then `None` is returned. /// /// This is like [`Regex::find`] but, but it accepts a concrete `&Input` /// instead of an `Into<Input>`. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Input, Match}; /// /// let re = Regex::new(r"Samwise|Sam")?; /// let input = Input::new( /// "one of the chief characters, Samwise the Brave", /// ); /// assert_eq!(Some(Match::must(0, 29..36)), re.search(&input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search(&self, input: &Input<'_>) -> Option<Match> { if self.imp.info.is_impossible(input) { return None; } let mut guard = self.pool.get(); let result = self.imp.strat.search(&mut guard, input); // We do this dance with the guard and explicitly put it back in the // pool because it seems to result in better codegen. If we let the // guard's Drop impl put it back in the pool, then functions like // ptr::drop_in_place get called and they *don't* get inlined. This // isn't usually a big deal, but in latency sensitive benchmarks the // extra function call can matter. // // I used `rebar measure -f '^grep/every-line$' -e meta` to measure // the effects here. // // Note that this doesn't eliminate the latency effects of using the // pool. There is still some (minor) cost for the "thread owner" of the // pool. (i.e., The thread that first calls a regex search routine.) // However, for other threads using the regex, the pool access can be // quite expensive as it goes through a mutex. Callers can avoid this // by either cloning the Regex (which creates a distinct copy of the // pool), or callers can use the lower level APIs that accept a 'Cache' // directly and do their own handling. PoolGuard::put(guard); result } /// Returns the end offset of the leftmost match. If no match exists, then /// `None` is returned. /// /// This is distinct from [`Regex::search`] in that it only returns the end /// of a match and not the start of the match. Depending on a variety of /// implementation details, this _may_ permit the regex engine to do less /// overall work. For example, if a DFA is being used to execute a search, /// then the start of a match usually requires running a separate DFA in /// reverse to the find the start of a match. If one only needs the end of /// a match, then the separate reverse scan to find the start of a match /// can be skipped. (Note that the reverse scan is avoided even when using /// `Regex::search` when possible, for example, in the case of an anchored /// search.) /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Input, HalfMatch}; /// /// let re = Regex::new(r"Samwise|Sam")?; /// let input = Input::new( /// "one of the chief characters, Samwise the Brave", /// ); /// assert_eq!(Some(HalfMatch::must(0, 36)), re.search_half(&input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_half(&self, input: &Input<'_>) -> Option<HalfMatch> { if self.imp.info.is_impossible(input) { return None; } let mut guard = self.pool.get(); let result = self.imp.strat.search_half(&mut guard, input); // See 'Regex::search' for why we put the guard back explicitly. PoolGuard::put(guard); result } /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided [`Captures`] /// value. If no match was found, then [`Captures::is_match`] is guaranteed /// to return `false`. /// /// This is like [`Regex::captures`], but it accepts a concrete `&Input` /// instead of an `Into<Input>`. /// /// # Example: specific pattern search /// /// This example shows how to build a multi-pattern `Regex` that permits /// searching for specific patterns. /// /// ``` /// use regex_automata::{ /// meta::Regex, /// Anchored, Match, PatternID, Input, /// }; /// /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; /// let mut caps = re.create_captures(); /// let haystack = "foo123"; /// /// // Since we are using the default leftmost-first match and both /// // patterns match at the same starting position, only the first pattern /// // will be returned in this case when doing a search for any of the /// // patterns. /// let expected = Some(Match::must(0, 0..6)); /// re.search_captures(&Input::new(haystack), &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// // But if we want to check whether some other pattern matches, then we /// // can provide its pattern ID. /// let expected = Some(Match::must(1, 0..6)); /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(1))); /// re.search_captures(&input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specifying the bounds of a search /// /// This example shows how providing the bounds of a search can produce /// different results than simply sub-slicing the haystack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, Match, Input}; /// /// let re = Regex::new(r"\b[0-9]{3}\b")?; /// let mut caps = re.create_captures(); /// let haystack = "foo123bar"; /// /// // Since we sub-slice the haystack, the search doesn't know about /// // the larger context and assumes that `123` is surrounded by word /// // boundaries. And of course, the match position is reported relative /// // to the sub-slice as well, which means we get `0..3` instead of /// // `3..6`. /// let expected = Some(Match::must(0, 0..3)); /// let input = Input::new(&haystack[3..6]); /// re.search_captures(&input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// // But if we provide the bounds of the search within the context of the /// // entire haystack, then the search can take the surrounding context /// // into account. (And if we did find a match, it would be reported /// // as a valid offset into `haystack` instead of its sub-slice.) /// let expected = None; /// let input = Input::new(haystack).range(3..6); /// re.search_captures(&input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_captures(&self, input: &Input<'_>, caps: &mut Captures) { caps.set_pattern(None); let pid = self.search_slots(input, caps.slots_mut()); caps.set_pattern(pid); } /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided `slots`, and /// returns the matching pattern ID. The contents of the slots for patterns /// other than the matching pattern are unspecified. If no match was found, /// then `None` is returned and the contents of `slots` is unspecified. /// /// This is like [`Regex::search`], but it accepts a raw slots slice /// instead of a `Captures` value. This is useful in contexts where you /// don't want or need to allocate a `Captures`. /// /// It is legal to pass _any_ number of slots to this routine. If the regex /// engine would otherwise write a slot offset that doesn't fit in the /// provided slice, then it is simply skipped. In general though, there are /// usually three slice lengths you might want to use: /// /// * An empty slice, if you only care about which pattern matched. /// * A slice with [`pattern_len() * 2`](Regex::pattern_len) slots, if you /// only care about the overall match spans for each matching pattern. /// * A slice with /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which /// permits recording match offsets for every capturing group in every /// pattern. /// /// # Example /// /// This example shows how to find the overall match offsets in a /// multi-pattern search without allocating a `Captures` value. Indeed, we /// can put our slots right on the stack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, PatternID, Input}; /// /// let re = Regex::new_many(&[ /// r"\pL+", /// r"\d+", /// ])?; /// let input = Input::new("!@#123"); /// /// // We only care about the overall match offsets here, so we just /// // allocate two slots for each pattern. Each slot records the start /// // and end of the match. /// let mut slots = [None; 4]; /// let pid = re.search_slots(&input, &mut slots); /// assert_eq!(Some(PatternID::must(1)), pid); /// /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. /// // See 'GroupInfo' for more details on the mapping between groups and /// // slot indices. /// let slot_start = pid.unwrap().as_usize() * 2; /// let slot_end = slot_start + 1; /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_slots( &self, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { if self.imp.info.is_impossible(input) { return None; } let mut guard = self.pool.get(); let result = self.imp.strat.search_slots(&mut guard, input, slots); // See 'Regex::search' for why we put the guard back explicitly. PoolGuard::put(guard); result } /// Writes the set of patterns that match anywhere in the given search /// configuration to `patset`. If multiple patterns match at the same /// position and this `Regex` was configured with [`MatchKind::All`] /// semantics, then all matching patterns are written to the given set. /// /// Unless all of the patterns in this `Regex` are anchored, then generally /// speaking, this will scan the entire haystack. /// /// This search routine *does not* clear the pattern set. This gives some /// flexibility to the caller (e.g., running multiple searches with the /// same pattern set), but does make the API bug-prone if you're reusing /// the same pattern set for multiple searches but intended them to be /// independent. /// /// If a pattern ID matched but the given `PatternSet` does not have /// sufficient capacity to store it, then it is not inserted and silently /// dropped. /// /// # Example /// /// This example shows how to find all matching patterns in a haystack, /// even when some patterns match at the same position as other patterns. /// It is important that we configure the `Regex` with [`MatchKind::All`] /// semantics here, or else overlapping matches will not be reported. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet}; /// /// let patterns = &[ /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", /// ]; /// let re = Regex::builder() /// .configure(Regex::config().match_kind(MatchKind::All)) /// .build_many(patterns)?; /// /// let input = Input::new("foobar"); /// let mut patset = PatternSet::new(re.pattern_len()); /// re.which_overlapping_matches(&input, &mut patset); /// let expected = vec![0, 2, 3, 4, 6]; /// let got: Vec<usize> = patset.iter().map(|p| p.as_usize()).collect(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn which_overlapping_matches( &self, input: &Input<'_>, patset: &mut PatternSet, ) { if self.imp.info.is_impossible(input) { return; } let mut guard = self.pool.get(); let result = self .imp .strat .which_overlapping_matches(&mut guard, input, patset); // See 'Regex::search' for why we put the guard back explicitly. PoolGuard::put(guard); result } } /// Lower level search routines that give more control, and require the caller /// to provide an explicit [`Cache`] parameter. impl Regex { /// This is like [`Regex::search`], but requires the caller to /// explicitly pass a [`Cache`]. /// /// # Why pass a `Cache` explicitly? /// /// Passing a `Cache` explicitly will bypass the use of an internal memory /// pool used by `Regex` to get a `Cache` for a search. The use of this /// pool can be slower in some cases when a `Regex` is used from multiple /// threads simultaneously. Typically, performance only becomes an issue /// when there is heavy contention, which in turn usually only occurs /// when each thread's primary unit of work is a regex search on a small /// haystack. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Input, Match}; /// /// let re = Regex::new(r"Samwise|Sam")?; /// let mut cache = re.create_cache(); /// let input = Input::new( /// "one of the chief characters, Samwise the Brave", /// ); /// assert_eq!( /// Some(Match::must(0, 29..36)), /// re.search_with(&mut cache, &input), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_with( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<Match> { if self.imp.info.is_impossible(input) { return None; } self.imp.strat.search(cache, input) } /// This is like [`Regex::search_half`], but requires the caller to /// explicitly pass a [`Cache`]. /// /// # Why pass a `Cache` explicitly? /// /// Passing a `Cache` explicitly will bypass the use of an internal memory /// pool used by `Regex` to get a `Cache` for a search. The use of this /// pool can be slower in some cases when a `Regex` is used from multiple /// threads simultaneously. Typically, performance only becomes an issue /// when there is heavy contention, which in turn usually only occurs /// when each thread's primary unit of work is a regex search on a small /// haystack. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Input, HalfMatch}; /// /// let re = Regex::new(r"Samwise|Sam")?; /// let mut cache = re.create_cache(); /// let input = Input::new( /// "one of the chief characters, Samwise the Brave", /// ); /// assert_eq!( /// Some(HalfMatch::must(0, 36)), /// re.search_half_with(&mut cache, &input), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_half_with( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch> { if self.imp.info.is_impossible(input) { return None; } self.imp.strat.search_half(cache, input) } /// This is like [`Regex::search_captures`], but requires the caller to /// explicitly pass a [`Cache`]. /// /// # Why pass a `Cache` explicitly? /// /// Passing a `Cache` explicitly will bypass the use of an internal memory /// pool used by `Regex` to get a `Cache` for a search. The use of this /// pool can be slower in some cases when a `Regex` is used from multiple /// threads simultaneously. Typically, performance only becomes an issue /// when there is heavy contention, which in turn usually only occurs /// when each thread's primary unit of work is a regex search on a small /// haystack. /// /// # Example: specific pattern search /// /// This example shows how to build a multi-pattern `Regex` that permits /// searching for specific patterns. /// /// ``` /// use regex_automata::{ /// meta::Regex, /// Anchored, Match, PatternID, Input, /// }; /// /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "foo123"; /// /// // Since we are using the default leftmost-first match and both /// // patterns match at the same starting position, only the first pattern /// // will be returned in this case when doing a search for any of the /// // patterns. /// let expected = Some(Match::must(0, 0..6)); /// re.search_captures_with(&mut cache, &Input::new(haystack), &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// // But if we want to check whether some other pattern matches, then we /// // can provide its pattern ID. /// let expected = Some(Match::must(1, 0..6)); /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(1))); /// re.search_captures_with(&mut cache, &input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specifying the bounds of a search /// /// This example shows how providing the bounds of a search can produce /// different results than simply sub-slicing the haystack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, Match, Input}; /// /// let re = Regex::new(r"\b[0-9]{3}\b")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "foo123bar"; /// /// // Since we sub-slice the haystack, the search doesn't know about /// // the larger context and assumes that `123` is surrounded by word /// // boundaries. And of course, the match position is reported relative /// // to the sub-slice as well, which means we get `0..3` instead of /// // `3..6`. /// let expected = Some(Match::must(0, 0..3)); /// let input = Input::new(&haystack[3..6]); /// re.search_captures_with(&mut cache, &input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// // But if we provide the bounds of the search within the context of the /// // entire haystack, then the search can take the surrounding context /// // into account. (And if we did find a match, it would be reported /// // as a valid offset into `haystack` instead of its sub-slice.) /// let expected = None; /// let input = Input::new(haystack).range(3..6); /// re.search_captures_with(&mut cache, &input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_captures_with( &self, cache: &mut Cache, input: &Input<'_>, caps: &mut Captures, ) { caps.set_pattern(None); let pid = self.search_slots_with(cache, input, caps.slots_mut()); caps.set_pattern(pid); } /// This is like [`Regex::search_slots`], but requires the caller to /// explicitly pass a [`Cache`]. /// /// # Why pass a `Cache` explicitly? /// /// Passing a `Cache` explicitly will bypass the use of an internal memory /// pool used by `Regex` to get a `Cache` for a search. The use of this /// pool can be slower in some cases when a `Regex` is used from multiple /// threads simultaneously. Typically, performance only becomes an issue /// when there is heavy contention, which in turn usually only occurs /// when each thread's primary unit of work is a regex search on a small /// haystack. /// /// # Example /// /// This example shows how to find the overall match offsets in a /// multi-pattern search without allocating a `Captures` value. Indeed, we /// can put our slots right on the stack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, PatternID, Input}; /// /// let re = Regex::new_many(&[ /// r"\pL+", /// r"\d+", /// ])?; /// let mut cache = re.create_cache(); /// let input = Input::new("!@#123"); /// /// // We only care about the overall match offsets here, so we just /// // allocate two slots for each pattern. Each slot records the start /// // and end of the match. /// let mut slots = [None; 4]; /// let pid = re.search_slots_with(&mut cache, &input, &mut slots); /// assert_eq!(Some(PatternID::must(1)), pid); /// /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. /// // See 'GroupInfo' for more details on the mapping between groups and /// // slot indices. /// let slot_start = pid.unwrap().as_usize() * 2; /// let slot_end = slot_start + 1; /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_slots_with( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { if self.imp.info.is_impossible(input) { return None; } self.imp.strat.search_slots(cache, input, slots) } /// This is like [`Regex::which_overlapping_matches`], but requires the /// caller to explicitly pass a [`Cache`]. /// /// Passing a `Cache` explicitly will bypass the use of an internal memory /// pool used by `Regex` to get a `Cache` for a search. The use of this /// pool can be slower in some cases when a `Regex` is used from multiple /// threads simultaneously. Typically, performance only becomes an issue /// when there is heavy contention, which in turn usually only occurs /// when each thread's primary unit of work is a regex search on a small /// haystack. /// /// # Why pass a `Cache` explicitly? /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet}; /// /// let patterns = &[ /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", /// ]; /// let re = Regex::builder() /// .configure(Regex::config().match_kind(MatchKind::All)) /// .build_many(patterns)?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("foobar"); /// let mut patset = PatternSet::new(re.pattern_len()); /// re.which_overlapping_matches_with(&mut cache, &input, &mut patset); /// let expected = vec![0, 2, 3, 4, 6]; /// let got: Vec<usize> = patset.iter().map(|p| p.as_usize()).collect(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn which_overlapping_matches_with( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { if self.imp.info.is_impossible(input) { return; } self.imp.strat.which_overlapping_matches(cache, input, patset) } } /// Various non-search routines for querying properties of a `Regex` and /// convenience routines for creating [`Captures`] and [`Cache`] values. impl Regex { /// Creates a new object for recording capture group offsets. This is used /// in search APIs like [`Regex::captures`] and [`Regex::search_captures`]. /// /// This is a convenience routine for /// `Captures::all(re.group_info().clone())`. Callers may build other types /// of `Captures` values that record less information (and thus require /// less work from the regex engine) using [`Captures::matches`] and /// [`Captures::empty`]. /// /// # Example /// /// This shows some alternatives to [`Regex::create_captures`]: /// /// ``` /// use regex_automata::{ /// meta::Regex, /// util::captures::Captures, /// Match, PatternID, Span, /// }; /// /// let re = Regex::new(r"(?<first>[A-Z][a-z]+) (?<last>[A-Z][a-z]+)")?; /// /// // This is equivalent to Regex::create_captures. It stores matching /// // offsets for all groups in the regex. /// let mut all = Captures::all(re.group_info().clone()); /// re.captures("<NAME>", &mut all); /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match()); /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first")); /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last")); /// /// // In this version, we only care about the implicit groups, which /// // means offsets for the explicit groups will be unavailable. It can /// // sometimes be faster to ask for fewer groups, since the underlying /// // regex engine needs to do less work to keep track of them. /// let mut matches = Captures::matches(re.group_info().clone()); /// re.captures("<NAME>", &mut matches); /// // We still get the overall match info. /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match()); /// // But now the explicit groups are unavailable. /// assert_eq!(None, matches.get_group_by_name("first")); /// assert_eq!(None, matches.get_group_by_name("last")); /// /// // Finally, in this version, we don't ask to keep track of offsets for /// // *any* groups. All we get back is whether a match occurred, and if /// // so, the ID of the pattern that matched. /// let mut empty = Captures::empty(re.group_info().clone()); /// re.captures("<NAME>", &mut empty); /// // it's a match! /// assert!(empty.is_match()); /// // for pattern ID 0 /// assert_eq!(Some(PatternID::ZERO), empty.pattern()); /// // Match offsets are unavailable. /// assert_eq!(None, empty.get_match()); /// // And of course, explicit groups are unavailable too. /// assert_eq!(None, empty.get_group_by_name("first")); /// assert_eq!(None, empty.get_group_by_name("last")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn create_captures(&self) -> Captures { Captures::all(self.group_info().clone()) } /// Creates a new cache for use with lower level search APIs like /// [`Regex::search_with`]. /// /// The cache returned should only be used for searches for this `Regex`. /// If you want to reuse the cache for another `Regex`, then you must call /// [`Cache::reset`] with that `Regex`. /// /// This is a convenience routine for [`Cache::new`]. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Input, Match}; /// /// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("crazy janey and her mission man"); /// assert_eq!( /// Some(Match::must(0, 20..31)), /// re.search_with(&mut cache, &input), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn create_cache(&self) -> Cache { self.imp.strat.create_cache() } /// Returns the total number of patterns in this regex. /// /// The standard [`Regex::new`] constructor always results in a `Regex` /// with a single pattern, but [`Regex::new_many`] permits building a /// multi-pattern regex. /// /// A `Regex` guarantees that the maximum possible `PatternID` returned in /// any match is `Regex::pattern_len() - 1`. In the case where the number /// of patterns is `0`, a match is impossible. /// /// # Example /// /// ``` /// use regex_automata::meta::Regex; /// /// let re = Regex::new(r"(?m)^[a-z]$")?; /// assert_eq!(1, re.pattern_len()); /// /// let re = Regex::new_many::<&str>(&[])?; /// assert_eq!(0, re.pattern_len()); /// /// let re = Regex::new_many(&["a", "b", "c"])?; /// assert_eq!(3, re.pattern_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn pattern_len(&self) -> usize { self.imp.info.pattern_len() } /// Returns the total number of capturing groups. /// /// This includes the implicit capturing group corresponding to the /// entire match. Therefore, the minimum value returned is `1`. /// /// # Example /// /// This shows a few patterns and how many capture groups they have. /// /// ``` /// use regex_automata::meta::Regex; /// /// let len = |pattern| { /// Regex::new(pattern).map(|re| re.captures_len()) /// }; /// /// assert_eq!(1, len("a")?); /// assert_eq!(2, len("(a)")?); /// assert_eq!(3, len("(a)|(b)")?); /// assert_eq!(5, len("(a)(b)|(c)(d)")?); /// assert_eq!(2, len("(a)|b")?); /// assert_eq!(2, len("a|(b)")?); /// assert_eq!(2, len("(b)*")?); /// assert_eq!(2, len("(b)+")?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: multiple patterns /// /// This routine also works for multiple patterns. The total number is /// the sum of the capture groups of each pattern. /// /// ``` /// use regex_automata::meta::Regex; /// /// let len = |patterns| { /// Regex::new_many(patterns).map(|re| re.captures_len()) /// }; /// /// assert_eq!(2, len(&["a", "b"])?); /// assert_eq!(4, len(&["(a)", "(b)"])?); /// assert_eq!(6, len(&["(a)|(b)", "(c)|(d)"])?); /// assert_eq!(8, len(&["(a)(b)|(c)(d)", "(x)(y)"])?); /// assert_eq!(3, len(&["(a)", "b"])?); /// assert_eq!(3, len(&["a", "(b)"])?); /// assert_eq!(4, len(&["(a)", "(b)*"])?); /// assert_eq!(4, len(&["(a)+", "(b)+"])?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn captures_len(&self) -> usize { self.imp .info .props_union() .explicit_captures_len() .saturating_add(self.pattern_len()) } /// Returns the total number of capturing groups that appear in every /// possible match. /// /// If the number of capture groups can vary depending on the match, then /// this returns `None`. That is, a value is only returned when the number /// of matching groups is invariant or "static." /// /// Note that like [`Regex::captures_len`], this **does** include the /// implicit capturing group corresponding to the entire match. Therefore, /// when a non-None value is returned, it is guaranteed to be at least `1`. /// Stated differently, a return value of `Some(0)` is impossible. /// /// # Example /// /// This shows a few cases where a static number of capture groups is /// available and a few cases where it is not. /// /// ``` /// use regex_automata::meta::Regex; /// /// let len = |pattern| { /// Regex::new(pattern).map(|re| re.static_captures_len()) /// }; /// /// assert_eq!(Some(1), len("a")?); /// assert_eq!(Some(2), len("(a)")?); /// assert_eq!(Some(2), len("(a)|(b)")?); /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); /// assert_eq!(None, len("(a)|b")?); /// assert_eq!(None, len("a|(b)")?); /// assert_eq!(None, len("(b)*")?); /// assert_eq!(Some(2), len("(b)+")?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: multiple patterns /// /// This property extends to regexes with multiple patterns as well. In /// order for their to be a static number of capture groups in this case, /// every pattern must have the same static number. /// /// ``` /// use regex_automata::meta::Regex; /// /// let len = |patterns| { /// Regex::new_many(patterns).map(|re| re.static_captures_len()) /// }; /// /// assert_eq!(Some(1), len(&["a", "b"])?); /// assert_eq!(Some(2), len(&["(a)", "(b)"])?); /// assert_eq!(Some(2), len(&["(a)|(b)", "(c)|(d)"])?); /// assert_eq!(Some(3), len(&["(a)(b)|(c)(d)", "(x)(y)"])?); /// assert_eq!(None, len(&["(a)", "b"])?); /// assert_eq!(None, len(&["a", "(b)"])?); /// assert_eq!(None, len(&["(a)", "(b)*"])?); /// assert_eq!(Some(2), len(&["(a)+", "(b)+"])?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn static_captures_len(&self) -> Option<usize> { self.imp .info .props_union() .static_explicit_captures_len() .map(|len| len.saturating_add(1)) } /// Return information about the capture groups in this `Regex`. /// /// A `GroupInfo` is an immutable object that can be cheaply cloned. It /// is responsible for maintaining a mapping between the capture groups /// in the concrete syntax of zero or more regex patterns and their /// internal representation used by some of the regex matchers. It is also /// responsible for maintaining a mapping between the name of each group /// (if one exists) and its corresponding group index. /// /// A `GroupInfo` is ultimately what is used to build a [`Captures`] value, /// which is some mutable space where group offsets are stored as a result /// of a search. /// /// # Example /// /// This shows some alternatives to [`Regex::create_captures`]: /// /// ``` /// use regex_automata::{ /// meta::Regex, /// util::captures::Captures, /// Match, PatternID, Span, /// }; /// /// let re = Regex::new(r"(?<first>[A-Z][a-z]+) (?<last>[A-Z][a-z]+)")?; /// /// // This is equivalent to Regex::create_captures. It stores matching /// // offsets for all groups in the regex. /// let mut all = Captures::all(re.group_info().clone()); /// re.captures("<NAME>", &mut all); /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match()); /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first")); /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last")); /// /// // In this version, we only care about the implicit groups, which /// // means offsets for the explicit groups will be unavailable. It can /// // sometimes be faster to ask for fewer groups, since the underlying /// // regex engine needs to do less work to keep track of them. /// let mut matches = Captures::matches(re.group_info().clone()); /// re.captures("<NAME>", &mut matches); /// // We still get the overall match info. /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match()); /// // But now the explicit groups are unavailable. /// assert_eq!(None, matches.get_group_by_name("first")); /// assert_eq!(None, matches.get_group_by_name("last")); /// /// // Finally, in this version, we don't ask to keep track of offsets for /// // *any* groups. All we get back is whether a match occurred, and if /// // so, the ID of the pattern that matched. /// let mut empty = Captures::empty(re.group_info().clone()); /// re.captures("<NAME>", &mut empty); /// // it's a match! /// assert!(empty.is_match()); /// // for pattern ID 0 /// assert_eq!(Some(PatternID::ZERO), empty.pattern()); /// // Match offsets are unavailable. /// assert_eq!(None, empty.get_match()); /// // And of course, explicit groups are unavailable too. /// assert_eq!(None, empty.get_group_by_name("first")); /// assert_eq!(None, empty.get_group_by_name("last")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn group_info(&self) -> &GroupInfo { self.imp.strat.group_info() } /// Returns the configuration object used to build this `Regex`. /// /// If no configuration object was explicitly passed, then the /// configuration returned represents the default. #[inline] pub fn get_config(&self) -> &Config { self.imp.info.config() } /// Returns true if this regex has a high chance of being "accelerated." /// /// The precise meaning of "accelerated" is specifically left unspecified, /// but the general meaning is that the search is a high likelihood of /// running faster than than a character-at-a-time loop inside a standard /// regex engine. /// /// When a regex is accelerated, it is only a *probabilistic* claim. That /// is, just because the regex is believed to be accelerated, that doesn't /// mean it will definitely execute searches very fast. Similarly, if a /// regex is *not* accelerated, that is also a probabilistic claim. That /// is, a regex for which `is_accelerated` returns `false` could still run /// searches more quickly than a regex for which `is_accelerated` returns /// `true`. /// /// Whether a regex is marked as accelerated or not is dependent on /// implementations details that may change in a semver compatible release. /// That is, a regex that is accelerated in a `x.y.1` release might not be /// accelerated in a `x.y.2` release. /// /// Basically, the value of acceleration boils down to a hedge: a hodge /// podge of internal heuristics combine to make a probabilistic guess /// that this regex search may run "fast." The value in knowing this from /// a caller's perspective is that it may act as a signal that no further /// work should be done to accelerate a search. For example, a grep-like /// tool might try to do some extra work extracting literals from a regex /// to create its own heuristic acceleration strategies. But it might /// choose to defer to this crate's acceleration strategy if one exists. /// This routine permits querying whether such a strategy is active for a /// particular regex. /// /// # Example /// /// ``` /// use regex_automata::meta::Regex; /// /// // A simple literal is very likely to be accelerated. /// let re = Regex::new(r"foo")?; /// assert!(re.is_accelerated()); /// /// // A regex with no literals is likely to not be accelerated. /// let re = Regex::new(r"\w")?; /// assert!(!re.is_accelerated()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_accelerated(&self) -> bool { self.imp.strat.is_accelerated() } /// Return the total approximate heap memory, in bytes, used by this `Regex`. /// /// Note that currently, there is no high level configuration for setting /// a limit on the specific value returned by this routine. Instead, the /// following routines can be used to control heap memory at a bit of a /// lower level: /// /// * [`Config::nfa_size_limit`] controls how big _any_ of the NFAs are /// allowed to be. /// * [`Config::onepass_size_limit`] controls how big the one-pass DFA is /// allowed to be. /// * [`Config::hybrid_cache_capacity`] controls how much memory the lazy /// DFA is permitted to allocate to store its transition table. /// * [`Config::dfa_size_limit`] controls how big a fully compiled DFA is /// allowed to be. /// * [`Config::dfa_state_limit`] controls the conditions under which the /// meta regex engine will even attempt to build a fully compiled DFA. #[inline] pub fn memory_usage(&self) -> usize { self.imp.strat.memory_usage() } } impl Clone for Regex { fn clone(&self) -> Regex { let imp = Arc::clone(&self.imp); let pool = { let strat = Arc::clone(&imp.strat); let create: CachePoolFn = Box::new(move || strat.create_cache()); Pool::new(create) }; Regex { imp, pool } } } #[derive(Clone, Debug)] pub(crate) struct RegexInfo(Arc<RegexInfoI>); #[derive(Clone, Debug)] struct RegexInfoI { config: Config, props: Vec<hir::Properties>, props_union: hir::Properties, } impl RegexInfo { fn new(config: Config, hirs: &[&Hir]) -> RegexInfo { // Collect all of the properties from each of the HIRs, and also // union them into one big set of properties representing all HIRs // as if they were in one big alternation. let mut props = vec![]; for hir in hirs.iter() { props.push(hir.properties().clone()); } let props_union = hir::Properties::union(&props); RegexInfo(Arc::new(RegexInfoI { config, props, props_union })) } pub(crate) fn config(&self) -> &Config { &self.0.config } pub(crate) fn props(&self) -> &[hir::Properties] { &self.0.props } pub(crate) fn props_union(&self) -> &hir::Properties { &self.0.props_union } pub(crate) fn pattern_len(&self) -> usize { self.props().len() } pub(crate) fn memory_usage(&self) -> usize { self.props().iter().map(|p| p.memory_usage()).sum::<usize>() + self.props_union().memory_usage() } /// Returns true when the search is guaranteed to be anchored. That is, /// when a match is reported, its offset is guaranteed to correspond to /// the start of the search. /// /// This includes returning true when `input` _isn't_ anchored but the /// underlying regex is. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_anchored_start(&self, input: &Input<'_>) -> bool { input.get_anchored().is_anchored() || self.is_always_anchored_start() } /// Returns true when this regex is always anchored to the start of a /// search. And in particular, that regardless of an `Input` configuration, /// if any match is reported it must start at `0`. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_always_anchored_start(&self) -> bool { use regex_syntax::hir::Look; self.props_union().look_set_prefix().contains(Look::Start) } /// Returns true when this regex is always anchored to the end of a /// search. And in particular, that regardless of an `Input` configuration, /// if any match is reported it must end at the end of the haystack. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_always_anchored_end(&self) -> bool { use regex_syntax::hir::Look; self.props_union().look_set_suffix().contains(Look::End) } /// Returns true if and only if it is known that a match is impossible /// for the given input. This is useful for short-circuiting and avoiding /// running the regex engine if it's known no match can be reported. /// /// Note that this doesn't necessarily detect every possible case. For /// example, when `pattern_len() == 0`, a match is impossible, but that /// case is so rare that it's fine to be handled by the regex engine /// itself. That is, it's not worth the cost of adding it here in order to /// make it a little faster. The reason is that this is called for every /// search. so there is some cost to adding checks here. Arguably, some of /// the checks that are here already probably shouldn't be here... #[cfg_attr(feature = "perf-inline", inline(always))] fn is_impossible(&self, input: &Input<'_>) -> bool { // The underlying regex is anchored, so if we don't start the search // at position 0, a match is impossible, because the anchor can only // match at position 0. if input.start() > 0 && self.is_always_anchored_start() { return true; } // Same idea, but for the end anchor. if input.end() < input.haystack().len() && self.is_always_anchored_end() { return true; } // If the haystack is smaller than the minimum length required, then // we know there can be no match. let minlen = match self.props_union().minimum_len() { None => return false, Some(minlen) => minlen, }; if input.get_span().len() < minlen { return true; } // Same idea as minimum, but for maximum. This is trickier. We can // only apply the maximum when we know the entire span that we're // searching *has* to match according to the regex (and possibly the // input configuration). If we know there is too much for the regex // to match, we can bail early. // // I don't think we can apply the maximum otherwise unfortunately. if self.is_anchored_start(input) && self.is_always_anchored_end() { let maxlen = match self.props_union().maximum_len() { None => return false, Some(maxlen) => maxlen, }; if input.get_span().len() > maxlen { return true; } } false } } /// An iterator over all non-overlapping matches. /// /// The iterator yields a [`Match`] value until no more matches could be found. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the `Regex` that produced this iterator. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the [`Regex::find_iter`] method. #[derive(Debug)] pub struct FindMatches<'r, 'h> { re: &'r Regex, cache: CachePoolGuard<'r>, it: iter::Searcher<'h>, } impl<'r, 'h> FindMatches<'r, 'h> { /// Returns the `Regex` value that created this iterator. #[inline] pub fn regex(&self) -> &'r Regex { self.re } /// Returns the current `Input` associated with this iterator. /// /// The `start` position on the given `Input` may change during iteration, /// but all other values are guaranteed to remain invariant. #[inline] pub fn input<'s>(&'s self) -> &'s Input<'h> { self.it.input() } } impl<'r, 'h> Iterator for FindMatches<'r, 'h> { type Item = Match; #[inline] fn next(&mut self) -> Option<Match> { let FindMatches { re, ref mut cache, ref mut it } = *self; it.advance(|input| Ok(re.search_with(cache, input))) } #[inline] fn count(self) -> usize { // If all we care about is a count of matches, then we only need to // find the end position of each match. This can give us a 2x perf // boost in some cases, because it avoids needing to do a reverse scan // to find the start of a match. let FindMatches { re, mut cache, it } = self; // This does the deref for PoolGuard once instead of every iter. let cache = &mut *cache; it.into_half_matches_iter( |input| Ok(re.search_half_with(cache, input)), ) .count() } } impl<'r, 'h> core::iter::FusedIterator for FindMatches<'r, 'h> {} /// An iterator over all non-overlapping leftmost matches with their capturing /// groups. /// /// The iterator yields a [`Captures`] value until no more matches could be /// found. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the `Regex` that produced this iterator. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the [`Regex::captures_iter`] method. #[derive(Debug)] pub struct CapturesMatches<'r, 'h> { re: &'r Regex, cache: CachePoolGuard<'r>, caps: Captures, it: iter::Searcher<'h>, } impl<'r, 'h> CapturesMatches<'r, 'h> { /// Returns the `Regex` value that created this iterator. #[inline] pub fn regex(&self) -> &'r Regex { self.re } /// Returns the current `Input` associated with this iterator. /// /// The `start` position on the given `Input` may change during iteration, /// but all other values are guaranteed to remain invariant. #[inline] pub fn input<'s>(&'s self) -> &'s Input<'h> { self.it.input() } } impl<'r, 'h> Iterator for CapturesMatches<'r, 'h> { type Item = Captures; #[inline] fn next(&mut self) -> Option<Captures> { // Splitting 'self' apart seems necessary to appease borrowck. let CapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = *self; let _ = it.advance(|input| { re.search_captures_with(cache, input, caps); Ok(caps.get_match()) }); if caps.is_match() { Some(caps.clone()) } else { None } } #[inline] fn count(self) -> usize { let CapturesMatches { re, mut cache, it, .. } = self; // This does the deref for PoolGuard once instead of every iter. let cache = &mut *cache; it.into_half_matches_iter( |input| Ok(re.search_half_with(cache, input)), ) .count() } } impl<'r, 'h> core::iter::FusedIterator for CapturesMatches<'r, 'h> {} /// Yields all substrings delimited by a regular expression match. /// /// The spans correspond to the offsets between matches. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the `Regex` that produced this iterator. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the [`Regex::split`] method. #[derive(Debug)] pub struct Split<'r, 'h> { finder: FindMatches<'r, 'h>, last: usize, } impl<'r, 'h> Split<'r, 'h> { /// Returns the current `Input` associated with this iterator. /// /// The `start` position on the given `Input` may change during iteration, /// but all other values are guaranteed to remain invariant. #[inline] pub fn input<'s>(&'s self) -> &'s Input<'h> { self.finder.input() } } impl<'r, 'h> Iterator for Split<'r, 'h> { type Item = Span; fn next(&mut self) -> Option<Span> { match self.finder.next() { None => { let len = self.finder.it.input().haystack().len(); if self.last > len { None } else { let span = Span::from(self.last..len); self.last = len + 1; // Next call will return None Some(span) } } Some(m) => { let span = Span::from(self.last..m.start()); self.last = m.end(); Some(span) } } } } impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {} /// Yields at most `N` spans delimited by a regular expression match. /// /// The spans correspond to the offsets between matches. The last span will be /// whatever remains after splitting. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the `Regex` that produced this iterator. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the [`Regex::splitn`] method. #[derive(Debug)] pub struct SplitN<'r, 'h> { splits: Split<'r, 'h>, limit: usize, } impl<'r, 'h> SplitN<'r, 'h> { /// Returns the current `Input` associated with this iterator. /// /// The `start` position on the given `Input` may change during iteration, /// but all other values are guaranteed to remain invariant. #[inline] pub fn input<'s>(&'s self) -> &'s Input<'h> { self.splits.input() } } impl<'r, 'h> Iterator for SplitN<'r, 'h> { type Item = Span; fn next(&mut self) -> Option<Span> { if self.limit == 0 { return None; } self.limit -= 1; if self.limit > 0 { return self.splits.next(); } let len = self.splits.finder.it.input().haystack().len(); if self.splits.last > len { // We've already returned all substrings. None } else { // self.n == 0, so future calls will return None immediately Some(Span::from(self.splits.last..len)) } } fn size_hint(&self) -> (usize, Option<usize>) { (0, Some(self.limit)) } } impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {} /// Represents mutable scratch space used by regex engines during a search. /// /// Most of the regex engines in this crate require some kind of /// mutable state in order to execute a search. This mutable state is /// explicitly separated from the the core regex object (such as a /// [`thompson::NFA`](crate::nfa::thompson::NFA)) so that the read-only regex /// object can be shared across multiple threads simultaneously without any /// synchronization. Conversely, a `Cache` must either be duplicated if using /// the same `Regex` from multiple threads, or else there must be some kind of /// synchronization that guarantees exclusive access while it's in use by one /// thread. /// /// A `Regex` attempts to do this synchronization for you by using a thread /// pool internally. Its size scales roughly with the number of simultaneous /// regex searches. /// /// For cases where one does not want to rely on a `Regex`'s internal thread /// pool, lower level routines such as [`Regex::search_with`] are provided /// that permit callers to pass a `Cache` into the search routine explicitly. /// /// General advice is that the thread pool is often more than good enough. /// However, it may be possible to observe the effects of its latency, /// especially when searching many small haystacks from many threads /// simultaneously. /// /// Caches can be created from their corresponding `Regex` via /// [`Regex::create_cache`]. A cache can only be used with either the `Regex` /// that created it, or the `Regex` that was most recently used to reset it /// with [`Cache::reset`]. Using a cache with any other `Regex` may result in /// panics or incorrect results. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Input, Match}; /// /// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("crazy janey and her mission man"); /// assert_eq!( /// Some(Match::must(0, 20..31)), /// re.search_with(&mut cache, &input), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Debug, Clone)] pub struct Cache { pub(crate) capmatches: Captures, pub(crate) pikevm: wrappers::PikeVMCache, pub(crate) backtrack: wrappers::BoundedBacktrackerCache, pub(crate) onepass: wrappers::OnePassCache, pub(crate) hybrid: wrappers::HybridCache, pub(crate) revhybrid: wrappers::ReverseHybridCache, } impl Cache { /// Creates a new `Cache` for use with this regex. /// /// The cache returned should only be used for searches for the given /// `Regex`. If you want to reuse the cache for another `Regex`, then you /// must call [`Cache::reset`] with that `Regex`. pub fn new(re: &Regex) -> Cache { re.create_cache() } /// Reset this cache such that it can be used for searching with the given /// `Regex` (and only that `Regex`). /// /// A cache reset permits potentially reusing memory already allocated in /// this cache with a different `Regex`. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different `Regex`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, Match, Input}; /// /// let re1 = Regex::new(r"\w")?; /// let re2 = Regex::new(r"\W")?; /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Match::must(0, 0..2)), /// re1.search_with(&mut cache, &Input::new("Δ")), /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the Regex we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// cache.reset(&re2); /// assert_eq!( /// Some(Match::must(0, 0..3)), /// re2.search_with(&mut cache, &Input::new("☃")), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset(&mut self, re: &Regex) { re.imp.strat.reset_cache(self) } /// Returns the heap memory usage, in bytes, of this cache. /// /// This does **not** include the stack size used up by this cache. To /// compute that, use `std::mem::size_of::<Cache>()`. pub fn memory_usage(&self) -> usize { let mut bytes = 0; bytes += self.pikevm.memory_usage(); bytes += self.backtrack.memory_usage(); bytes += self.onepass.memory_usage(); bytes += self.hybrid.memory_usage(); bytes += self.revhybrid.memory_usage(); bytes } } /// An object describing the configuration of a `Regex`. /// /// This configuration only includes options for the /// non-syntax behavior of a `Regex`, and can be applied via the /// [`Builder::configure`] method. For configuring the syntax options, see /// [`util::syntax::Config`](crate::util::syntax::Config). /// /// # Example: lower the NFA size limit /// /// In some cases, the default size limit might be too big. The size limit can /// be lowered, which will prevent large regex patterns from compiling. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let result = Regex::builder() /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) /// // Not even 20KB is enough to build a single large Unicode class! /// .build(r"\pL"); /// assert!(result.is_err()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug, Default)] pub struct Config { // As with other configuration types in this crate, we put all our knobs // in options so that we can distinguish between "default" and "not set." // This makes it possible to easily combine multiple configurations // without default values overwriting explicitly specified values. See the // 'overwrite' method. // // For docs on the fields below, see the corresponding method setters. match_kind: Option<MatchKind>, utf8_empty: Option<bool>, autopre: Option<bool>, pre: Option<Option<Prefilter>>, which_captures: Option<WhichCaptures>, nfa_size_limit: Option<Option<usize>>, onepass_size_limit: Option<Option<usize>>, hybrid_cache_capacity: Option<usize>, hybrid: Option<bool>, dfa: Option<bool>, dfa_size_limit: Option<Option<usize>>, dfa_state_limit: Option<Option<usize>>, onepass: Option<bool>, backtrack: Option<bool>, byte_classes: Option<bool>, line_terminator: Option<u8>, } impl Config { /// Create a new configuration object for a `Regex`. pub fn new() -> Config { Config::default() } /// Set the match semantics for a `Regex`. /// /// The default value is [`MatchKind::LeftmostFirst`]. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Match, MatchKind}; /// /// // By default, leftmost-first semantics are used, which /// // disambiguates matches at the same position by selecting /// // the one that corresponds earlier in the pattern. /// let re = Regex::new("sam|samwise")?; /// assert_eq!(Some(Match::must(0, 0..3)), re.find("samwise")); /// /// // But with 'all' semantics, match priority is ignored /// // and all match states are included. When coupled with /// // a leftmost search, the search will report the last /// // possible match. /// let re = Regex::builder() /// .configure(Regex::config().match_kind(MatchKind::All)) /// .build("sam|samwise")?; /// assert_eq!(Some(Match::must(0, 0..7)), re.find("samwise")); /// // Beware that this can lead to skipping matches! /// // Usually 'all' is used for anchored reverse searches /// // only, or for overlapping searches. /// assert_eq!(Some(Match::must(0, 4..11)), re.find("sam samwise")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn match_kind(self, kind: MatchKind) -> Config { Config { match_kind: Some(kind), ..self } } /// Toggles whether empty matches are permitted to occur between the code /// units of a UTF-8 encoded codepoint. /// /// This should generally be enabled when search a `&str` or anything that /// you otherwise know is valid UTF-8. It should be disabled in all other /// cases. Namely, if the haystack is not valid UTF-8 and this is enabled, /// then behavior is unspecified. /// /// By default, this is enabled. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::new("")?; /// let got: Vec<Match> = re.find_iter("☃").collect(); /// // Matches only occur at the beginning and end of the snowman. /// assert_eq!(got, vec![ /// Match::must(0, 0..0), /// Match::must(0, 3..3), /// ]); /// /// let re = Regex::builder() /// .configure(Regex::config().utf8_empty(false)) /// .build("")?; /// let got: Vec<Match> = re.find_iter("☃").collect(); /// // Matches now occur at every position! /// assert_eq!(got, vec![ /// Match::must(0, 0..0), /// Match::must(0, 1..1), /// Match::must(0, 2..2), /// Match::must(0, 3..3), /// ]); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn utf8_empty(self, yes: bool) -> Config { Config { utf8_empty: Some(yes), ..self } } /// Toggles whether automatic prefilter support is enabled. /// /// If this is disabled and [`Config::prefilter`] is not set, then the /// meta regex engine will not use any prefilters. This can sometimes /// be beneficial in cases where you know (or have measured) that the /// prefilter leads to overall worse search performance. /// /// By default, this is enabled. /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::builder() /// .configure(Regex::config().auto_prefilter(false)) /// .build(r"Bruce \w+")?; /// let hay = "Hello <NAME>!"; /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay)); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn auto_prefilter(self, yes: bool) -> Config { Config { autopre: Some(yes), ..self } } /// Overrides and sets the prefilter to use inside a `Regex`. /// /// This permits one to forcefully set a prefilter in cases where the /// caller knows better than whatever the automatic prefilter logic is /// capable of. /// /// By default, this is set to `None` and an automatic prefilter will be /// used if one could be built. (Assuming [`Config::auto_prefilter`] is /// enabled, which it is by default.) /// /// # Example /// /// This example shows how to set your own prefilter. In the case of a /// pattern like `Bruce \w+`, the automatic prefilter is likely to be /// constructed in a way that it will look for occurrences of `Bruce `. /// In most cases, this is the best choice. But in some cases, it may be /// the case that running `memchr` on `B` is the best choice. One can /// achieve that behavior by overriding the automatic prefilter logic /// and providing a prefilter that just matches `B`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// meta::Regex, /// util::prefilter::Prefilter, /// Match, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["B"]) /// .expect("a prefilter"); /// let re = Regex::builder() /// .configure(Regex::config().prefilter(Some(pre))) /// .build(r"Bruce \w+")?; /// let hay = "Hello <NAME>!"; /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: incorrect prefilters can lead to incorrect results! /// /// Be warned that setting an incorrect prefilter can lead to missed /// matches. So if you use this option, ensure your prefilter can _never_ /// report false negatives. (A false positive is, on the other hand, quite /// okay and generally unavoidable.) /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// meta::Regex, /// util::prefilter::Prefilter, /// Match, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Z"]) /// .expect("a prefilter"); /// let re = Regex::builder() /// .configure(Regex::config().prefilter(Some(pre))) /// .build(r"Bruce \w+")?; /// let hay = "Hello <NAME>!"; /// // Oops! No match found, but there should be one! /// assert_eq!(None, re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn prefilter(self, pre: Option<Prefilter>) -> Config { Config { pre: Some(pre), ..self } } /// Configures what kinds of groups are compiled as "capturing" in the /// underlying regex engine. /// /// This is set to [`WhichCaptures::All`] by default. Callers may wish to /// use [`WhichCaptures::Implicit`] in cases where one wants avoid the /// overhead of capture states for explicit groups. /// /// Note that another approach to avoiding the overhead of capture groups /// is by using non-capturing groups in the regex pattern. That is, /// `(?:a)` instead of `(a)`. This option is useful when you can't control /// the concrete syntax but know that you don't need the underlying capture /// states. For example, using `WhichCaptures::Implicit` will behave as if /// all explicit capturing groups in the pattern were non-capturing. /// /// Setting this to `WhichCaptures::None` is usually not the right thing to /// do. When no capture states are compiled, some regex engines (such as /// the `PikeVM`) won't be able to report match offsets. This will manifest /// as no match being found. /// /// # Example /// /// This example demonstrates how the results of capture groups can change /// based on this option. First we show the default (all capture groups in /// the pattern are capturing): /// /// ``` /// use regex_automata::{meta::Regex, Match, Span}; /// /// let re = Regex::new(r"foo([0-9]+)bar")?; /// let hay = "foo123bar"; /// /// let mut caps = re.create_captures(); /// re.captures(hay, &mut caps); /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0)); /// assert_eq!(Some(Span::from(3..6)), caps.get_group(1)); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And now we show the behavior when we only include implicit capture /// groups. In this case, we can only find the overall match span, but the /// spans of any other explicit group don't exist because they are treated /// as non-capturing. (In effect, when `WhichCaptures::Implicit` is used, /// there is no real point in using [`Regex::captures`] since it will never /// be able to report more information than [`Regex::find`].) /// /// ``` /// use regex_automata::{ /// meta::Regex, /// nfa::thompson::WhichCaptures, /// Match, /// Span, /// }; /// /// let re = Regex::builder() /// .configure(Regex::config().which_captures(WhichCaptures::Implicit)) /// .build(r"foo([0-9]+)bar")?; /// let hay = "foo123bar"; /// /// let mut caps = re.create_captures(); /// re.captures(hay, &mut caps); /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0)); /// assert_eq!(None, caps.get_group(1)); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn which_captures(mut self, which_captures: WhichCaptures) -> Config { self.which_captures = Some(which_captures); self } /// Sets the size limit, in bytes, to enforce on the construction of every /// NFA build by the meta regex engine. /// /// Setting it to `None` disables the limit. This is not recommended if /// you're compiling untrusted patterns. /// /// Note that this limit is applied to _each_ NFA built, and if any of /// them excceed the limit, then construction will fail. This limit does /// _not_ correspond to the total memory used by all NFAs in the meta regex /// engine. /// /// This defaults to some reasonable number that permits most reasonable /// patterns. /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let result = Regex::builder() /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) /// // Not even 20KB is enough to build a single large Unicode class! /// .build(r"\pL"); /// assert!(result.is_err()); /// /// // But notice that building such a regex with the exact same limit /// // can succeed depending on other aspects of the configuration. For /// // example, a single *forward* NFA will (at time of writing) fit into /// // the 20KB limit, but a *reverse* NFA of the same pattern will not. /// // So if one configures a meta regex such that a reverse NFA is never /// // needed and thus never built, then the 20KB limit will be enough for /// // a pattern like \pL! /// let result = Regex::builder() /// .configure(Regex::config() /// .nfa_size_limit(Some(20 * (1<<10))) /// // The DFAs are the only thing that (currently) need a reverse /// // NFA. So if both are disabled, the meta regex engine will /// // skip building the reverse NFA. Note that this isn't an API /// // guarantee. A future semver compatible version may introduce /// // new use cases for a reverse NFA. /// .hybrid(false) /// .dfa(false) /// ) /// // Not even 20KB is enough to build a single large Unicode class! /// .build(r"\pL"); /// assert!(result.is_ok()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn nfa_size_limit(self, limit: Option<usize>) -> Config { Config { nfa_size_limit: Some(limit), ..self } } /// Sets the size limit, in bytes, for the one-pass DFA. /// /// Setting it to `None` disables the limit. Disabling the limit is /// strongly discouraged when compiling untrusted patterns. Even if the /// patterns are trusted, it still may not be a good idea, since a one-pass /// DFA can use a lot of memory. With that said, as the size of a regex /// increases, the likelihood of it being one-pass likely decreases. /// /// This defaults to some reasonable number that permits most reasonable /// one-pass patterns. /// /// # Example /// /// This shows how to set the one-pass DFA size limit. Note that since /// a one-pass DFA is an optional component of the meta regex engine, /// this size limit only impacts what is built internally and will never /// determine whether a `Regex` itself fails to build. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let result = Regex::builder() /// .configure(Regex::config().onepass_size_limit(Some(2 * (1<<20)))) /// .build(r"\pL{5}"); /// assert!(result.is_ok()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn onepass_size_limit(self, limit: Option<usize>) -> Config { Config { onepass_size_limit: Some(limit), ..self } } /// Set the cache capacity, in bytes, for the lazy DFA. /// /// The cache capacity of the lazy DFA determines approximately how much /// heap memory it is allowed to use to store its state transitions. The /// state transitions are computed at search time, and if the cache fills /// up it, it is cleared. At this point, any previously generated state /// transitions are lost and are re-generated if they're needed again. /// /// This sort of cache filling and clearing works quite well _so long as /// cache clearing happens infrequently_. If it happens too often, then the /// meta regex engine will stop using the lazy DFA and switch over to a /// different regex engine. /// /// In cases where the cache is cleared too often, it may be possible to /// give the cache more space and reduce (or eliminate) how often it is /// cleared. Similarly, sometimes a regex is so big that the lazy DFA isn't /// used at all if its cache capacity isn't big enough. /// /// The capacity set here is a _limit_ on how much memory is used. The /// actual memory used is only allocated as it's needed. /// /// Determining the right value for this is a little tricky and will likely /// required some profiling. Enabling the `logging` feature and setting the /// log level to `trace` will also tell you how often the cache is being /// cleared. /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let result = Regex::builder() /// .configure(Regex::config().hybrid_cache_capacity(20 * (1<<20))) /// .build(r"\pL{5}"); /// assert!(result.is_ok()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn hybrid_cache_capacity(self, limit: usize) -> Config { Config { hybrid_cache_capacity: Some(limit), ..self } } /// Sets the size limit, in bytes, for heap memory used for a fully /// compiled DFA. /// /// **NOTE:** If you increase this, you'll likely also need to increase /// [`Config::dfa_state_limit`]. /// /// In contrast to the lazy DFA, building a full DFA requires computing /// all of its state transitions up front. This can be a very expensive /// process, and runs in worst case `2^n` time and space (where `n` is /// proportional to the size of the regex). However, a full DFA unlocks /// some additional optimization opportunities. /// /// Because full DFAs can be so expensive, the default limits for them are /// incredibly small. Generally speaking, if your regex is moderately big /// or if you're using Unicode features (`\w` is Unicode-aware by default /// for example), then you can expect that the meta regex engine won't even /// attempt to build a DFA for it. /// /// If this and [`Config::dfa_state_limit`] are set to `None`, then the /// meta regex will not use any sort of limits when deciding whether to /// build a DFA. This in turn makes construction of a `Regex` take /// worst case exponential time and space. Even short patterns can result /// in huge space blow ups. So it is strongly recommended to keep some kind /// of limit set! /// /// The default is set to a small number that permits some simple regexes /// to get compiled into DFAs in reasonable time. /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let result = Regex::builder() /// // 100MB is much bigger than the default. /// .configure(Regex::config() /// .dfa_size_limit(Some(100 * (1<<20))) /// // We don't care about size too much here, so just /// // remove the NFA state limit altogether. /// .dfa_state_limit(None)) /// .build(r"\pL{5}"); /// assert!(result.is_ok()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn dfa_size_limit(self, limit: Option<usize>) -> Config { Config { dfa_size_limit: Some(limit), ..self } } /// Sets a limit on the total number of NFA states, beyond which, a full /// DFA is not attempted to be compiled. /// /// This limit works in concert with [`Config::dfa_size_limit`]. Namely, /// where as `Config::dfa_size_limit` is applied by attempting to construct /// a DFA, this limit is used to avoid the attempt in the first place. This /// is useful to avoid hefty initialization costs associated with building /// a DFA for cases where it is obvious the DFA will ultimately be too big. /// /// By default, this is set to a very small number. /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::meta::Regex; /// /// let result = Regex::builder() /// .configure(Regex::config() /// // Sometimes the default state limit rejects DFAs even /// // if they would fit in the size limit. Here, we disable /// // the check on the number of NFA states and just rely on /// // the size limit. /// .dfa_state_limit(None)) /// .build(r"(?-u)\w{30}"); /// assert!(result.is_ok()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn dfa_state_limit(self, limit: Option<usize>) -> Config { Config { dfa_state_limit: Some(limit), ..self } } /// Whether to attempt to shrink the size of the alphabet for the regex /// pattern or not. When enabled, the alphabet is shrunk into a set of /// equivalence classes, where every byte in the same equivalence class /// cannot discriminate between a match or non-match. /// /// **WARNING:** This is only useful for debugging DFAs. Disabling this /// does not yield any speed advantages. Indeed, disabling it can result /// in much higher memory usage. Disabling byte classes is useful for /// debugging the actual generated transitions because it lets one see the /// transitions defined on actual bytes instead of the equivalence classes. /// /// This option is enabled by default and should never be disabled unless /// one is debugging the meta regex engine's internals. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::builder() /// .configure(Regex::config().byte_classes(false)) /// .build(r"[a-z]+")?; /// let hay = "!!quux!!"; /// assert_eq!(Some(Match::must(0, 2..6)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn byte_classes(self, yes: bool) -> Config { Config { byte_classes: Some(yes), ..self } } /// Set the line terminator to be used by the `^` and `$` anchors in /// multi-line mode. /// /// This option has no effect when CRLF mode is enabled. That is, /// regardless of this setting, `(?Rm:^)` and `(?Rm:$)` will always treat /// `\r` and `\n` as line terminators (and will never match between a `\r` /// and a `\n`). /// /// By default, `\n` is the line terminator. /// /// **Warning**: This does not change the behavior of `.`. To do that, /// you'll need to configure the syntax option /// [`syntax::Config::line_terminator`](crate::util::syntax::Config::line_terminator) /// in addition to this. Otherwise, `.` will continue to match any /// character other than `\n`. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().multi_line(true)) /// .configure(Regex::config().line_terminator(b'\x00')) /// .build(r"^foo$")?; /// let hay = "\x00foo\x00"; /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn line_terminator(self, byte: u8) -> Config { Config { line_terminator: Some(byte), ..self } } /// Toggle whether the hybrid NFA/DFA (also known as the "lazy DFA") should /// be available for use by the meta regex engine. /// /// Enabling this does not necessarily mean that the lazy DFA will /// definitely be used. It just means that it will be _available_ for use /// if the meta regex engine thinks it will be useful. /// /// When the `hybrid` crate feature is enabled, then this is enabled by /// default. Otherwise, if the crate feature is disabled, then this is /// always disabled, regardless of its setting by the caller. pub fn hybrid(self, yes: bool) -> Config { Config { hybrid: Some(yes), ..self } } /// Toggle whether a fully compiled DFA should be available for use by the /// meta regex engine. /// /// Enabling this does not necessarily mean that a DFA will definitely be /// used. It just means that it will be _available_ for use if the meta /// regex engine thinks it will be useful. /// /// When the `dfa-build` crate feature is enabled, then this is enabled by /// default. Otherwise, if the crate feature is disabled, then this is /// always disabled, regardless of its setting by the caller. pub fn dfa(self, yes: bool) -> Config { Config { dfa: Some(yes), ..self } } /// Toggle whether a one-pass DFA should be available for use by the meta /// regex engine. /// /// Enabling this does not necessarily mean that a one-pass DFA will /// definitely be used. It just means that it will be _available_ for /// use if the meta regex engine thinks it will be useful. (Indeed, a /// one-pass DFA can only be used when the regex is one-pass. See the /// [`dfa::onepass`](crate::dfa::onepass) module for more details.) /// /// When the `dfa-onepass` crate feature is enabled, then this is enabled /// by default. Otherwise, if the crate feature is disabled, then this is /// always disabled, regardless of its setting by the caller. pub fn onepass(self, yes: bool) -> Config { Config { onepass: Some(yes), ..self } } /// Toggle whether a bounded backtracking regex engine should be available /// for use by the meta regex engine. /// /// Enabling this does not necessarily mean that a bounded backtracker will /// definitely be used. It just means that it will be _available_ for use /// if the meta regex engine thinks it will be useful. /// /// When the `nfa-backtrack` crate feature is enabled, then this is enabled /// by default. Otherwise, if the crate feature is disabled, then this is /// always disabled, regardless of its setting by the caller. pub fn backtrack(self, yes: bool) -> Config { Config { backtrack: Some(yes), ..self } } /// Returns the match kind on this configuration, as set by /// [`Config::match_kind`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_match_kind(&self) -> MatchKind { self.match_kind.unwrap_or(MatchKind::LeftmostFirst) } /// Returns whether empty matches must fall on valid UTF-8 boundaries, as /// set by [`Config::utf8_empty`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_utf8_empty(&self) -> bool { self.utf8_empty.unwrap_or(true) } /// Returns whether automatic prefilters are enabled, as set by /// [`Config::auto_prefilter`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_auto_prefilter(&self) -> bool { self.autopre.unwrap_or(true) } /// Returns a manually set prefilter, if one was set by /// [`Config::prefilter`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_prefilter(&self) -> Option<&Prefilter> { self.pre.as_ref().unwrap_or(&None).as_ref() } /// Returns the capture configuration, as set by /// [`Config::which_captures`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_which_captures(&self) -> WhichCaptures { self.which_captures.unwrap_or(WhichCaptures::All) } /// Returns NFA size limit, as set by [`Config::nfa_size_limit`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_nfa_size_limit(&self) -> Option<usize> { self.nfa_size_limit.unwrap_or(Some(10 * (1 << 20))) } /// Returns one-pass DFA size limit, as set by /// [`Config::onepass_size_limit`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_onepass_size_limit(&self) -> Option<usize> { self.onepass_size_limit.unwrap_or(Some(1 * (1 << 20))) } /// Returns hybrid NFA/DFA cache capacity, as set by /// [`Config::hybrid_cache_capacity`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_hybrid_cache_capacity(&self) -> usize { self.hybrid_cache_capacity.unwrap_or(2 * (1 << 20)) } /// Returns DFA size limit, as set by [`Config::dfa_size_limit`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_dfa_size_limit(&self) -> Option<usize> { // The default for this is VERY small because building a full DFA is // ridiculously costly. But for regexes that are very small, it can be // beneficial to use a full DFA. In particular, a full DFA can enable // additional optimizations via something called "accelerated" states. // Namely, when there's a state with only a few outgoing transitions, // we can temporary suspend walking the transition table and use memchr // for just those outgoing transitions to skip ahead very quickly. // // Generally speaking, if Unicode is enabled in your regex and you're // using some kind of Unicode feature, then it's going to blow this // size limit. Moreover, Unicode tends to defeat the "accelerated" // state optimization too, so it's a double whammy. // // We also use a limit on the number of NFA states to avoid even // starting the DFA construction process. Namely, DFA construction // itself could make lots of initial allocs proportional to the size // of the NFA, and if the NFA is large, it doesn't make sense to pay // that cost if we know it's likely to be blown by a large margin. self.dfa_size_limit.unwrap_or(Some(40 * (1 << 10))) } /// Returns DFA size limit in terms of the number of states in the NFA, as /// set by [`Config::dfa_state_limit`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_dfa_state_limit(&self) -> Option<usize> { // Again, as with the size limit, we keep this very small. self.dfa_state_limit.unwrap_or(Some(30)) } /// Returns whether byte classes are enabled, as set by /// [`Config::byte_classes`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_byte_classes(&self) -> bool { self.byte_classes.unwrap_or(true) } /// Returns the line terminator for this configuration, as set by /// [`Config::line_terminator`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_line_terminator(&self) -> u8 { self.line_terminator.unwrap_or(b'\n') } /// Returns whether the hybrid NFA/DFA regex engine may be used, as set by /// [`Config::hybrid`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_hybrid(&self) -> bool { #[cfg(feature = "hybrid")] { self.hybrid.unwrap_or(true) } #[cfg(not(feature = "hybrid"))] { false } } /// Returns whether the DFA regex engine may be used, as set by /// [`Config::dfa`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_dfa(&self) -> bool { #[cfg(feature = "dfa-build")] { self.dfa.unwrap_or(true) } #[cfg(not(feature = "dfa-build"))] { false } } /// Returns whether the one-pass DFA regex engine may be used, as set by /// [`Config::onepass`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_onepass(&self) -> bool { #[cfg(feature = "dfa-onepass")] { self.onepass.unwrap_or(true) } #[cfg(not(feature = "dfa-onepass"))] { false } } /// Returns whether the bounded backtracking regex engine may be used, as /// set by [`Config::backtrack`]. /// /// If it was not explicitly set, then a default value is returned. pub fn get_backtrack(&self) -> bool { #[cfg(feature = "nfa-backtrack")] { self.backtrack.unwrap_or(true) } #[cfg(not(feature = "nfa-backtrack"))] { false } } /// Overwrite the default configuration such that the options in `o` are /// always used. If an option in `o` is not set, then the corresponding /// option in `self` is used. If it's not set in `self` either, then it /// remains not set. pub(crate) fn overwrite(&self, o: Config) -> Config { Config { match_kind: o.match_kind.or(self.match_kind), utf8_empty: o.utf8_empty.or(self.utf8_empty), autopre: o.autopre.or(self.autopre), pre: o.pre.or_else(|| self.pre.clone()), which_captures: o.which_captures.or(self.which_captures), nfa_size_limit: o.nfa_size_limit.or(self.nfa_size_limit), onepass_size_limit: o .onepass_size_limit .or(self.onepass_size_limit), hybrid_cache_capacity: o .hybrid_cache_capacity .or(self.hybrid_cache_capacity), hybrid: o.hybrid.or(self.hybrid), dfa: o.dfa.or(self.dfa), dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit), dfa_state_limit: o.dfa_state_limit.or(self.dfa_state_limit), onepass: o.onepass.or(self.onepass), backtrack: o.backtrack.or(self.backtrack), byte_classes: o.byte_classes.or(self.byte_classes), line_terminator: o.line_terminator.or(self.line_terminator), } } } /// A builder for configuring and constructing a `Regex`. /// /// The builder permits configuring two different aspects of a `Regex`: /// /// * [`Builder::configure`] will set high-level configuration options as /// described by a [`Config`]. /// * [`Builder::syntax`] will set the syntax level configuration options /// as described by a [`util::syntax::Config`](crate::util::syntax::Config). /// This only applies when building a `Regex` from pattern strings. /// /// Once configured, the builder can then be used to construct a `Regex` from /// one of 4 different inputs: /// /// * [`Builder::build`] creates a regex from a single pattern string. /// * [`Builder::build_many`] creates a regex from many pattern strings. /// * [`Builder::build_from_hir`] creates a regex from a /// [`regex-syntax::Hir`](Hir) expression. /// * [`Builder::build_many_from_hir`] creates a regex from many /// [`regex-syntax::Hir`](Hir) expressions. /// /// The latter two methods in particular provide a way to construct a fully /// feature regular expression matcher directly from an `Hir` expression /// without having to first convert it to a string. (This is in contrast to the /// top-level `regex` crate which intentionally provides no such API in order /// to avoid making `regex-syntax` a public dependency.) /// /// As a convenience, this builder may be created via [`Regex::builder`], which /// may help avoid an extra import. /// /// # Example: change the line terminator /// /// This example shows how to enable multi-line mode by default and change the /// line terminator to the NUL byte: /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().multi_line(true)) /// .configure(Regex::config().line_terminator(b'\x00')) /// .build(r"^foo$")?; /// let hay = "\x00foo\x00"; /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: disable UTF-8 requirement /// /// By default, regex patterns are required to match UTF-8. This includes /// regex patterns that can produce matches of length zero. In the case of an /// empty match, by default, matches will not appear between the code units of /// a UTF-8 encoded codepoint. /// /// However, it can be useful to disable this requirement, particularly if /// you're searching things like `&[u8]` that are not known to be valid UTF-8. /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let mut builder = Regex::builder(); /// // Disables the requirement that non-empty matches match UTF-8. /// builder.syntax(syntax::Config::new().utf8(false)); /// // Disables the requirement that empty matches match UTF-8 boundaries. /// builder.configure(Regex::config().utf8_empty(false)); /// /// // We can match raw bytes via \xZZ syntax, but we need to disable /// // Unicode mode to do that. We could disable it everywhere, or just /// // selectively, as shown here. /// let re = builder.build(r"(?-u:\xFF)foo(?-u:\xFF)")?; /// let hay = b"\xFFfoo\xFF"; /// assert_eq!(Some(Match::must(0, 0..5)), re.find(hay)); /// /// // We can also match between code units. /// let re = builder.build(r"")?; /// let hay = "☃"; /// assert_eq!(re.find_iter(hay).collect::<Vec<Match>>(), vec![ /// Match::must(0, 0..0), /// Match::must(0, 1..1), /// Match::must(0, 2..2), /// Match::must(0, 3..3), /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Builder { config: Config, ast: ast::parse::ParserBuilder, hir: hir::translate::TranslatorBuilder, } impl Builder { /// Creates a new builder for configuring and constructing a [`Regex`]. pub fn new() -> Builder { Builder { config: Config::default(), ast: ast::parse::ParserBuilder::new(), hir: hir::translate::TranslatorBuilder::new(), } } /// Builds a `Regex` from a single pattern string. /// /// If there was a problem parsing the pattern or a problem turning it into /// a regex matcher, then an error is returned. /// /// # Example /// /// This example shows how to configure syntax options. /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().crlf(true).multi_line(true)) /// .build(r"^foo$")?; /// let hay = "\r\nfoo\r\n"; /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build(&self, pattern: &str) -> Result<Regex, BuildError> { self.build_many(&[pattern]) } /// Builds a `Regex` from many pattern strings. /// /// If there was a problem parsing any of the patterns or a problem turning /// them into a regex matcher, then an error is returned. /// /// # Example: finding the pattern that caused an error /// /// When a syntax error occurs, it is possible to ask which pattern /// caused the syntax error. /// /// ``` /// use regex_automata::{meta::Regex, PatternID}; /// /// let err = Regex::builder() /// .build_many(&["a", "b", r"\p{Foo}", "c"]) /// .unwrap_err(); /// assert_eq!(Some(PatternID::must(2)), err.pattern()); /// ``` /// /// # Example: zero patterns is valid /// /// Building a regex with zero patterns results in a regex that never /// matches anything. Because this routine is generic, passing an empty /// slice usually requires a turbo-fish (or something else to help type /// inference). /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::builder() /// .build_many::<&str>(&[])?; /// assert_eq!(None, re.find("")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<Regex, BuildError> { use crate::util::primitives::IteratorIndexExt; log! { debug!("building meta regex with {} patterns:", patterns.len()); for (pid, p) in patterns.iter().with_pattern_ids() { let p = p.as_ref(); // We might split a grapheme with this truncation logic, but // that's fine. We at least avoid splitting a codepoint. let maxoff = p .char_indices() .map(|(i, ch)| i + ch.len_utf8()) .take(1000) .last() .unwrap_or(0); if maxoff < p.len() { debug!("{:?}: {}[... snip ...]", pid, &p[..maxoff]); } else { debug!("{:?}: {}", pid, p); } } } let (mut asts, mut hirs) = (vec![], vec![]); for (pid, p) in patterns.iter().with_pattern_ids() { let ast = self .ast .build() .parse(p.as_ref()) .map_err(|err| BuildError::ast(pid, err))?; asts.push(ast); } for ((pid, p), ast) in patterns.iter().with_pattern_ids().zip(asts.iter()) { let hir = self .hir .build() .translate(p.as_ref(), ast) .map_err(|err| BuildError::hir(pid, err))?; hirs.push(hir); } self.build_many_from_hir(&hirs) } /// Builds a `Regex` directly from an `Hir` expression. /// /// This is useful if you needed to parse a pattern string into an `Hir` /// for other reasons (such as analysis or transformations). This routine /// permits building a `Regex` directly from the `Hir` expression instead /// of first converting the `Hir` back to a pattern string. /// /// When using this method, any options set via [`Builder::syntax`] are /// ignored. Namely, the syntax options only apply when parsing a pattern /// string, which isn't relevant here. /// /// If there was a problem building the underlying regex matcher for the /// given `Hir`, then an error is returned. /// /// # Example /// /// This example shows how one can hand-construct an `Hir` expression and /// build a regex from it without doing any parsing at all. /// /// ``` /// use { /// regex_automata::{meta::Regex, Match}, /// regex_syntax::hir::{Hir, Look}, /// }; /// /// // (?Rm)^foo$ /// let hir = Hir::concat(vec![ /// Hir::look(Look::StartCRLF), /// Hir::literal("foo".as_bytes()), /// Hir::look(Look::EndCRLF), /// ]); /// let re = Regex::builder() /// .build_from_hir(&hir)?; /// let hay = "\r\nfoo\r\n"; /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_from_hir(&self, hir: &Hir) -> Result<Regex, BuildError> { self.build_many_from_hir(&[hir]) } /// Builds a `Regex` directly from many `Hir` expressions. /// /// This is useful if you needed to parse pattern strings into `Hir` /// expressions for other reasons (such as analysis or transformations). /// This routine permits building a `Regex` directly from the `Hir` /// expressions instead of first converting the `Hir` expressions back to /// pattern strings. /// /// When using this method, any options set via [`Builder::syntax`] are /// ignored. Namely, the syntax options only apply when parsing a pattern /// string, which isn't relevant here. /// /// If there was a problem building the underlying regex matcher for the /// given `Hir` expressions, then an error is returned. /// /// Note that unlike [`Builder::build_many`], this can only fail as a /// result of building the underlying matcher. In that case, there is /// no single `Hir` expression that can be isolated as a reason for the /// failure. So if this routine fails, it's not possible to determine which /// `Hir` expression caused the failure. /// /// # Example /// /// This example shows how one can hand-construct multiple `Hir` /// expressions and build a single regex from them without doing any /// parsing at all. /// /// ``` /// use { /// regex_automata::{meta::Regex, Match}, /// regex_syntax::hir::{Hir, Look}, /// }; /// /// // (?Rm)^foo$ /// let hir1 = Hir::concat(vec![ /// Hir::look(Look::StartCRLF), /// Hir::literal("foo".as_bytes()), /// Hir::look(Look::EndCRLF), /// ]); /// // (?Rm)^bar$ /// let hir2 = Hir::concat(vec![ /// Hir::look(Look::StartCRLF), /// Hir::literal("bar".as_bytes()), /// Hir::look(Look::EndCRLF), /// ]); /// let re = Regex::builder() /// .build_many_from_hir(&[&hir1, &hir2])?; /// let hay = "\r\nfoo\r\nbar"; /// let got: Vec<Match> = re.find_iter(hay).collect(); /// let expected = vec![ /// Match::must(0, 2..5), /// Match::must(1, 7..10), /// ]; /// assert_eq!(expected, got); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_many_from_hir<H: Borrow<Hir>>( &self, hirs: &[H], ) -> Result<Regex, BuildError> { let config = self.config.clone(); // We collect the HIRs into a vec so we can write internal routines // with '&[&Hir]'. i.e., Don't use generics everywhere to keep code // bloat down.. let hirs: Vec<&Hir> = hirs.iter().map(|hir| hir.borrow()).collect(); let info = RegexInfo::new(config, &hirs); let strat = strategy::new(&info, &hirs)?; let pool = { let strat = Arc::clone(&strat); let create: CachePoolFn = Box::new(move || strat.create_cache()); Pool::new(create) }; Ok(Regex { imp: Arc::new(RegexI { strat, info }), pool }) } /// Configure the behavior of a `Regex`. /// /// This configuration controls non-syntax options related to the behavior /// of a `Regex`. This includes things like whether empty matches can split /// a codepoint, prefilters, line terminators and a long list of options /// for configuring which regex engines the meta regex engine will be able /// to use internally. /// /// # Example /// /// This example shows how to disable UTF-8 empty mode. This will permit /// empty matches to occur between the UTF-8 encoding of a codepoint. /// /// ``` /// use regex_automata::{meta::Regex, Match}; /// /// let re = Regex::new("")?; /// let got: Vec<Match> = re.find_iter("☃").collect(); /// // Matches only occur at the beginning and end of the snowman. /// assert_eq!(got, vec![ /// Match::must(0, 0..0), /// Match::must(0, 3..3), /// ]); /// /// let re = Regex::builder() /// .configure(Regex::config().utf8_empty(false)) /// .build("")?; /// let got: Vec<Match> = re.find_iter("☃").collect(); /// // Matches now occur at every position! /// assert_eq!(got, vec![ /// Match::must(0, 0..0), /// Match::must(0, 1..1), /// Match::must(0, 2..2), /// Match::must(0, 3..3), /// ]); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn configure(&mut self, config: Config) -> &mut Builder { self.config = self.config.overwrite(config); self } /// Configure the syntax options when parsing a pattern string while /// building a `Regex`. /// /// These options _only_ apply when [`Builder::build`] or [`Builder::build_many`] /// are used. The other build methods accept `Hir` values, which have /// already been parsed. /// /// # Example /// /// This example shows how to enable case insensitive mode. /// /// ``` /// use regex_automata::{meta::Regex, util::syntax, Match}; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().case_insensitive(true)) /// .build(r"δ")?; /// assert_eq!(Some(Match::must(0, 0..2)), re.find(r"Δ")); /// /// Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { config.apply_ast(&mut self.ast); config.apply_hir(&mut self.hir); self } } #[cfg(test)] mod tests { use super::*; // I found this in the course of building out the benchmark suite for // rebar. #[test] fn regression() { env_logger::init(); let re = Regex::new(r"[a-zA-Z]+ing").unwrap(); assert_eq!(1, re.find_iter("tingling").count()); } } <file_sep>/tests/misc.rs use regex::Regex; macro_rules! regex { ($pattern:expr) => { regex::Regex::new($pattern).unwrap() }; } #[test] fn unclosed_group_error() { let err = Regex::new(r"(").unwrap_err(); let msg = err.to_string(); assert!(msg.contains("unclosed group"), "error message: {:?}", msg); } #[test] fn regex_string() { assert_eq!(r"[a-zA-Z0-9]+", regex!(r"[a-zA-Z0-9]+").as_str()); assert_eq!(r"[a-zA-Z0-9]+", &format!("{}", regex!(r"[a-zA-Z0-9]+"))); assert_eq!( r#"Regex("[a-zA-Z0-9]+")"#, &format!("{:?}", regex!(r"[a-zA-Z0-9]+")) ); } #[test] fn capture_names() { let re = regex!(r"(.)(?P<a>.)"); assert_eq!(3, re.captures_len()); assert_eq!((3, Some(3)), re.capture_names().size_hint()); assert_eq!( vec![None, None, Some("a")], re.capture_names().collect::<Vec<_>>() ); } #[test] fn capture_index() { let re = regex!(r"^(?P<name>.+)$"); let cap = re.captures("abc").unwrap(); assert_eq!(&cap[0], "abc"); assert_eq!(&cap[1], "abc"); assert_eq!(&cap["name"], "abc"); } #[test] #[should_panic] fn capture_index_panic_usize() { let re = regex!(r"^(?P<name>.+)$"); let cap = re.captures("abc").unwrap(); let _ = cap[2]; } #[test] #[should_panic] fn capture_index_panic_name() { let re = regex!(r"^(?P<name>.+)$"); let cap = re.captures("abc").unwrap(); let _ = cap["bad name"]; } #[test] fn capture_index_lifetime() { // This is a test of whether the types on `caps["..."]` are general // enough. If not, this will fail to typecheck. fn inner(s: &str) -> usize { let re = regex!(r"(?P<number>[0-9]+)"); let caps = re.captures(s).unwrap(); caps["number"].len() } assert_eq!(3, inner("123")); } #[test] fn capture_misc() { let re = regex!(r"(.)(?P<a>a)?(.)(?P<b>.)"); let cap = re.captures("abc").unwrap(); assert_eq!(5, cap.len()); assert_eq!((0, 3), { let m = cap.get(0).unwrap(); (m.start(), m.end()) }); assert_eq!(None, cap.get(2)); assert_eq!((2, 3), { let m = cap.get(4).unwrap(); (m.start(), m.end()) }); assert_eq!("abc", cap.get(0).unwrap().as_str()); assert_eq!(None, cap.get(2)); assert_eq!("c", cap.get(4).unwrap().as_str()); assert_eq!(None, cap.name("a")); assert_eq!("c", cap.name("b").unwrap().as_str()); } #[test] fn sub_capture_matches() { let re = regex!(r"([a-z])(([a-z])|([0-9]))"); let cap = re.captures("a5").unwrap(); let subs: Vec<_> = cap.iter().collect(); assert_eq!(5, subs.len()); assert!(subs[0].is_some()); assert!(subs[1].is_some()); assert!(subs[2].is_some()); assert!(subs[3].is_none()); assert!(subs[4].is_some()); assert_eq!("a5", subs[0].unwrap().as_str()); assert_eq!("a", subs[1].unwrap().as_str()); assert_eq!("5", subs[2].unwrap().as_str()); assert_eq!("5", subs[4].unwrap().as_str()); } // Test that the DFA can handle pathological cases. (This should result in the // DFA's cache being flushed too frequently, which should cause it to quit and // fall back to the NFA algorithm.) #[test] fn dfa_handles_pathological_case() { fn ones_and_zeroes(count: usize) -> String { let mut s = String::new(); for i in 0..count { if i % 3 == 0 { s.push('1'); } else { s.push('0'); } } s } let re = regex!(r"[01]*1[01]{20}$"); let text = { let mut pieces = ones_and_zeroes(100_000); pieces.push('1'); pieces.push_str(&ones_and_zeroes(20)); pieces }; assert!(re.is_match(&text)); } <file_sep>/regex-automata/src/util/search.rs /*! Types and routines that support the search APIs of most regex engines. This sub-module isn't exposed directly, but rather, its contents are exported at the crate root due to the universality of most of the types and routines in this module. */ use core::ops::{Range, RangeBounds}; use crate::util::{escape::DebugByte, primitives::PatternID, utf8}; /// The parameters for a regex search including the haystack to search. /// /// It turns out that regex searches have a few parameters, and in most cases, /// those parameters have defaults that work in the vast majority of cases. /// This `Input` type exists to make that common case seamnless while also /// providing an avenue for changing the parameters of a search. In particular, /// this type enables doing so without a combinatorial explosion of different /// methods and/or superfluous parameters in the common cases. /// /// An `Input` permits configuring the following things: /// /// * Search only a substring of a haystack, while taking the broader context /// into account for resolving look-around assertions. /// * Indicating whether to search for all patterns in a regex, or to /// only search for one pattern in particular. /// * Whether to perform an anchored on unanchored search. /// * Whether to report a match as early as possible. /// /// All of these parameters, except for the haystack, have sensible default /// values. This means that the minimal search configuration is simply a call /// to [`Input::new`] with your haystack. Setting any other parameter is /// optional. /// /// Moreover, for any `H` that implements `AsRef<[u8]>`, there exists a /// `From<H> for Input` implementation. This is useful because many of the /// search APIs in this crate accept an `Into<Input>`. This means you can /// provide string or byte strings to these routines directly, and they'll /// automatically get converted into an `Input` for you. /// /// The lifetime parameter `'h` refers to the lifetime of the haystack. /// /// # Organization /// /// The API of `Input` is split into a few different parts: /// /// * A builder-like API that transforms a `Input` by value. Examples: /// [`Input::span`] and [`Input::anchored`]. /// * A setter API that permits mutating parameters in place. Examples: /// [`Input::set_span`] and [`Input::set_anchored`]. /// * A getter API that permits retrieving any of the search parameters. /// Examples: [`Input::get_span`] and [`Input::get_anchored`]. /// * A few convenience getter routines that don't conform to the above naming /// pattern due to how common they are. Examples: [`Input::haystack`], /// [`Input::start`] and [`Input::end`]. /// * Miscellaneous predicates and other helper routines that are useful /// in some contexts. Examples: [`Input::is_char_boundary`]. /// /// A `Input` exposes so much because it is meant to be used by both callers of /// regex engines _and_ implementors of regex engines. A constraining factor is /// that regex engines should accept a `&Input` as its lowest level API, which /// means that implementors should only use the "getter" APIs of a `Input`. /// /// # Valid bounds and search termination /// /// An `Input` permits setting the bounds of a search via either /// [`Input::span`] or [`Input::range`]. The bounds set must be valid, or /// else a panic will occur. Bounds are valid if and only if: /// /// * The bounds represent a valid range into the input's haystack. /// * **or** the end bound is a valid ending bound for the haystack *and* /// the start bound is exactly one greater than the start bound. /// /// In the latter case, [`Input::is_done`] will return true and indicates any /// search receiving such an input should immediately return with no match. /// /// Note that while `Input` is used for reverse searches in this crate, the /// `Input::is_done` predicate assumes a forward search. Because unsigned /// offsets are used internally, there is no way to tell from only the offsets /// whether a reverse search is done or not. /// /// # Regex engine support /// /// Any regex engine accepting an `Input` must support at least the following /// things: /// /// * Searching a `&[u8]` for matches. /// * Searching a substring of `&[u8]` for a match, such that any match /// reported must appear entirely within that substring. /// * For a forwards search, a match should never be reported when /// [`Input::is_done`] returns true. (For reverse searches, termination should /// be handled outside of `Input`.) /// /// Supporting other aspects of an `Input` are optional, but regex engines /// should handle aspects they don't support gracefully. How this is done is /// generally up to the regex engine. This crate generally treats unsupported /// anchored modes as an error to report for example, but for simplicity, in /// the meta regex engine, trying to search with an invalid pattern ID just /// results in no match being reported. #[derive(Clone)] pub struct Input<'h> { haystack: &'h [u8], span: Span, anchored: Anchored, earliest: bool, } impl<'h> Input<'h> { /// Create a new search configuration for the given haystack. #[inline] pub fn new<H: ?Sized + AsRef<[u8]>>(haystack: &'h H) -> Input<'h> { Input { haystack: haystack.as_ref(), span: Span { start: 0, end: haystack.as_ref().len() }, anchored: Anchored::No, earliest: false, } } /// Set the span for this search. /// /// This routine does not panic if the span given is not a valid range for /// this search's haystack. If this search is run with an invalid range, /// then the most likely outcome is that the actual search execution will /// panic. /// /// This routine is generic over how a span is provided. While /// a [`Span`] may be given directly, one may also provide a /// `std::ops::Range<usize>`. To provide anything supported by range /// syntax, use the [`Input::range`] method. /// /// The default span is the entire haystack. /// /// Note that [`Input::range`] overrides this method and vice versa. /// /// # Panics /// /// This panics if the given span does not correspond to valid bounds in /// the haystack or the termination of a search. /// /// # Example /// /// This example shows how the span of the search can impact whether a /// match is reported or not. This is particularly relevant for look-around /// operators, which might take things outside of the span into account /// when determining whether they match. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Match, Input, /// }; /// /// // Look for 'at', but as a distinct word. /// let re = PikeVM::new(r"\bat\b")?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// // Our haystack contains 'at', but not as a distinct word. /// let haystack = "batter"; /// /// // A standard search finds nothing, as expected. /// let input = Input::new(haystack); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(None, caps.get_match()); /// /// // But if we wanted to search starting at position '1', we might /// // slice the haystack. If we do this, it's impossible for the \b /// // anchors to take the surrounding context into account! And thus, /// // a match is produced. /// let input = Input::new(&haystack[1..3]); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 0..2)), caps.get_match()); /// /// // But if we specify the span of the search instead of slicing the /// // haystack, then the regex engine can "see" outside of the span /// // and resolve the anchors correctly. /// let input = Input::new(haystack).span(1..3); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(None, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This may seem a little ham-fisted, but this scenario tends to come up /// if some other regex engine found the match span and now you need to /// re-process that span to look for capturing groups. (e.g., Run a faster /// DFA first, find a match, then run the PikeVM on just the match span to /// resolve capturing groups.) In order to implement that sort of logic /// correctly, you need to set the span on the search instead of slicing /// the haystack directly. /// /// The other advantage of using this routine to specify the bounds of the /// search is that the match offsets are still reported in terms of the /// original haystack. For example, the second search in the example above /// reported a match at position `0`, even though `at` starts at offset /// `1` because we sliced the haystack. #[inline] pub fn span<S: Into<Span>>(mut self, span: S) -> Input<'h> { self.set_span(span); self } /// Like `Input::span`, but accepts any range instead. /// /// This routine does not panic if the range given is not a valid range for /// this search's haystack. If this search is run with an invalid range, /// then the most likely outcome is that the actual search execution will /// panic. /// /// The default range is the entire haystack. /// /// Note that [`Input::span`] overrides this method and vice versa. /// /// # Panics /// /// This routine will panic if the given range could not be converted /// to a valid [`Range`]. For example, this would panic when given /// `0..=usize::MAX` since it cannot be represented using a half-open /// interval in terms of `usize`. /// /// This also panics if the given range does not correspond to valid bounds /// in the haystack or the termination of a search. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let input = Input::new("foobar"); /// assert_eq!(0..6, input.get_range()); /// /// let input = Input::new("foobar").range(2..=4); /// assert_eq!(2..5, input.get_range()); /// ``` #[inline] pub fn range<R: RangeBounds<usize>>(mut self, range: R) -> Input<'h> { self.set_range(range); self } /// Sets the anchor mode of a search. /// /// When a search is anchored (so that's [`Anchored::Yes`] or /// [`Anchored::Pattern`]), a match must begin at the start of a search. /// When a search is not anchored (that's [`Anchored::No`]), regex engines /// will behave as if the pattern started with a `(?s-u:.)*?`. This prefix /// permits a match to appear anywhere. /// /// By default, the anchored mode is [`Anchored::No`]. /// /// **WARNING:** this is subtly different than using a `^` at the start of /// your regex. A `^` forces a regex to match exclusively at the start of /// a haystack, regardless of where you begin your search. In contrast, /// anchoring a search will allow your regex to match anywhere in your /// haystack, but the match must start at the beginning of a search. /// /// For example, consider the haystack `aba` and the following searches: /// /// 1. The regex `^a` is compiled with `Anchored::No` and searches `aba` /// starting at position `2`. Since `^` requires the match to start at /// the beginning of the haystack and `2 > 0`, no match is found. /// 2. The regex `a` is compiled with `Anchored::Yes` and searches `aba` /// starting at position `2`. This reports a match at `[2, 3]` since /// the match starts where the search started. Since there is no `^`, /// there is no requirement for the match to start at the beginning of /// the haystack. /// 3. The regex `a` is compiled with `Anchored::Yes` and searches `aba` /// starting at position `1`. Since `b` corresponds to position `1` and /// since the search is anchored, it finds no match. While the regex /// matches at other positions, configuring the search to be anchored /// requires that it only report a match that begins at the same offset /// as the beginning of the search. /// 4. The regex `a` is compiled with `Anchored::No` and searches `aba` /// starting at position `1`. Since the search is not anchored and /// the regex does not start with `^`, the search executes as if there /// is a `(?s:.)*?` prefix that permits it to match anywhere. Thus, it /// reports a match at `[2, 3]`. /// /// Note that the [`Anchored::Pattern`] mode is like `Anchored::Yes`, /// except it only reports matches for a particular pattern. /// /// # Example /// /// This demonstrates the differences between an anchored search and /// a pattern that begins with `^` (as described in the above warning /// message). /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Anchored, Match, Input, /// }; /// /// let haystack = "aba"; /// /// let re = PikeVM::new(r"^a")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let input = Input::new(haystack).span(2..3).anchored(Anchored::No); /// re.search(&mut cache, &input, &mut caps); /// // No match is found because 2 is not the beginning of the haystack, /// // which is what ^ requires. /// assert_eq!(None, caps.get_match()); /// /// let re = PikeVM::new(r"a")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let input = Input::new(haystack).span(2..3).anchored(Anchored::Yes); /// re.search(&mut cache, &input, &mut caps); /// // An anchored search can still match anywhere in the haystack, it just /// // must begin at the start of the search which is '2' in this case. /// assert_eq!(Some(Match::must(0, 2..3)), caps.get_match()); /// /// let re = PikeVM::new(r"a")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let input = Input::new(haystack).span(1..3).anchored(Anchored::Yes); /// re.search(&mut cache, &input, &mut caps); /// // No match is found since we start searching at offset 1 which /// // corresponds to 'b'. Since there is no '(?s:.)*?' prefix, no match /// // is found. /// assert_eq!(None, caps.get_match()); /// /// let re = PikeVM::new(r"a")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let input = Input::new(haystack).span(1..3).anchored(Anchored::No); /// re.search(&mut cache, &input, &mut caps); /// // Since anchored=no, an implicit '(?s:.)*?' prefix was added to the /// // pattern. Even though the search starts at 'b', the 'match anything' /// // prefix allows the search to match 'a'. /// let expected = Some(Match::must(0, 2..3)); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn anchored(mut self, mode: Anchored) -> Input<'h> { self.set_anchored(mode); self } /// Whether to execute an "earliest" search or not. /// /// When running a non-overlapping search, an "earliest" search will return /// the match location as early as possible. For example, given a pattern /// of `foo[0-9]+` and a haystack of `foo12345`, a normal leftmost search /// will return `foo12345` as a match. But an "earliest" search for regex /// engines that support "earliest" semantics will return `foo1` as a /// match, since as soon as the first digit following `foo` is seen, it is /// known to have found a match. /// /// Note that "earliest" semantics generally depend on the regex engine. /// Different regex engines may determine there is a match at different /// points. So there is no guarantee that "earliest" matches will always /// return the same offsets for all regex engines. The "earliest" notion /// is really about when the particular regex engine determines there is /// a match rather than a consistent semantic unto itself. This is often /// useful for implementing "did a match occur or not" predicates, but /// sometimes the offset is useful as well. /// /// This is disabled by default. /// /// # Example /// /// This example shows the difference between "earliest" searching and /// normal searching. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match, Input}; /// /// let re = PikeVM::new(r"foo[0-9]+")?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// // A normal search implements greediness like you expect. /// let input = Input::new("foo12345"); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 0..8)), caps.get_match()); /// /// // When 'earliest' is enabled and the regex engine supports /// // it, the search will bail once it knows a match has been /// // found. /// let input = Input::new("foo12345").earliest(true); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 0..4)), caps.get_match()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn earliest(mut self, yes: bool) -> Input<'h> { self.set_earliest(yes); self } /// Set the span for this search configuration. /// /// This is like the [`Input::span`] method, except this mutates the /// span in place. /// /// This routine is generic over how a span is provided. While /// a [`Span`] may be given directly, one may also provide a /// `std::ops::Range<usize>`. /// /// # Panics /// /// This panics if the given span does not correspond to valid bounds in /// the haystack or the termination of a search. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let mut input = Input::new("foobar"); /// assert_eq!(0..6, input.get_range()); /// input.set_span(2..4); /// assert_eq!(2..4, input.get_range()); /// ``` #[inline] pub fn set_span<S: Into<Span>>(&mut self, span: S) { let span = span.into(); assert!( span.end <= self.haystack.len() && span.start <= span.end.wrapping_add(1), "invalid span {:?} for haystack of length {}", span, self.haystack.len(), ); self.span = span; } /// Set the span for this search configuration given any range. /// /// This is like the [`Input::range`] method, except this mutates the /// span in place. /// /// This routine does not panic if the range given is not a valid range for /// this search's haystack. If this search is run with an invalid range, /// then the most likely outcome is that the actual search execution will /// panic. /// /// # Panics /// /// This routine will panic if the given range could not be converted /// to a valid [`Range`]. For example, this would panic when given /// `0..=usize::MAX` since it cannot be represented using a half-open /// interval in terms of `usize`. /// /// This also panics if the given span does not correspond to valid bounds /// in the haystack or the termination of a search. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let mut input = Input::new("foobar"); /// assert_eq!(0..6, input.get_range()); /// input.set_range(2..=4); /// assert_eq!(2..5, input.get_range()); /// ``` #[inline] pub fn set_range<R: RangeBounds<usize>>(&mut self, range: R) { use core::ops::Bound; // It's a little weird to convert ranges into spans, and then spans // back into ranges when we actually slice the haystack. Because // of that process, we always represent everything as a half-open // internal. Therefore, handling things like m..=n is a little awkward. let start = match range.start_bound() { Bound::Included(&i) => i, // Can this case ever happen? Range syntax doesn't support it... Bound::Excluded(&i) => i.checked_add(1).unwrap(), Bound::Unbounded => 0, }; let end = match range.end_bound() { Bound::Included(&i) => i.checked_add(1).unwrap(), Bound::Excluded(&i) => i, Bound::Unbounded => self.haystack().len(), }; self.set_span(Span { start, end }); } /// Set the starting offset for the span for this search configuration. /// /// This is a convenience routine for only mutating the start of a span /// without having to set the entire span. /// /// # Panics /// /// This panics if the span resulting from the new start position does not /// correspond to valid bounds in the haystack or the termination of a /// search. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let mut input = Input::new("foobar"); /// assert_eq!(0..6, input.get_range()); /// input.set_start(5); /// assert_eq!(5..6, input.get_range()); /// ``` #[inline] pub fn set_start(&mut self, start: usize) { self.set_span(Span { start, ..self.get_span() }); } /// Set the ending offset for the span for this search configuration. /// /// This is a convenience routine for only mutating the end of a span /// without having to set the entire span. /// /// # Panics /// /// This panics if the span resulting from the new end position does not /// correspond to valid bounds in the haystack or the termination of a /// search. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let mut input = Input::new("foobar"); /// assert_eq!(0..6, input.get_range()); /// input.set_end(5); /// assert_eq!(0..5, input.get_range()); /// ``` #[inline] pub fn set_end(&mut self, end: usize) { self.set_span(Span { end, ..self.get_span() }); } /// Set the anchor mode of a search. /// /// This is like [`Input::anchored`], except it mutates the search /// configuration in place. /// /// # Example /// /// ``` /// use regex_automata::{Anchored, Input, PatternID}; /// /// let mut input = Input::new("foobar"); /// assert_eq!(Anchored::No, input.get_anchored()); /// /// let pid = PatternID::must(5); /// input.set_anchored(Anchored::Pattern(pid)); /// assert_eq!(Anchored::Pattern(pid), input.get_anchored()); /// ``` #[inline] pub fn set_anchored(&mut self, mode: Anchored) { self.anchored = mode; } /// Set whether the search should execute in "earliest" mode or not. /// /// This is like [`Input::earliest`], except it mutates the search /// configuration in place. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let mut input = Input::new("foobar"); /// assert!(!input.get_earliest()); /// input.set_earliest(true); /// assert!(input.get_earliest()); /// ``` #[inline] pub fn set_earliest(&mut self, yes: bool) { self.earliest = yes; } /// Return a borrow of the underlying haystack as a slice of bytes. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let input = Input::new("foobar"); /// assert_eq!(b"foobar", input.haystack()); /// ``` #[inline] pub fn haystack(&self) -> &[u8] { self.haystack } /// Return the start position of this search. /// /// This is a convenience routine for `search.get_span().start()`. /// /// When [`Input::is_done`] is `false`, this is guaranteed to return /// an offset that is less than or equal to [`Input::end`]. Otherwise, /// the offset is one greater than [`Input::end`]. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let input = Input::new("foobar"); /// assert_eq!(0, input.start()); /// /// let input = Input::new("foobar").span(2..4); /// assert_eq!(2, input.start()); /// ``` #[inline] pub fn start(&self) -> usize { self.get_span().start } /// Return the end position of this search. /// /// This is a convenience routine for `search.get_span().end()`. /// /// This is guaranteed to return an offset that is a valid exclusive end /// bound for this input's haystack. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let input = Input::new("foobar"); /// assert_eq!(6, input.end()); /// /// let input = Input::new("foobar").span(2..4); /// assert_eq!(4, input.end()); /// ``` #[inline] pub fn end(&self) -> usize { self.get_span().end } /// Return the span for this search configuration. /// /// If one was not explicitly set, then the span corresponds to the entire /// range of the haystack. /// /// When [`Input::is_done`] is `false`, the span returned is guaranteed /// to correspond to valid bounds for this input's haystack. /// /// # Example /// /// ``` /// use regex_automata::{Input, Span}; /// /// let input = Input::new("foobar"); /// assert_eq!(Span { start: 0, end: 6 }, input.get_span()); /// ``` #[inline] pub fn get_span(&self) -> Span { self.span } /// Return the span as a range for this search configuration. /// /// If one was not explicitly set, then the span corresponds to the entire /// range of the haystack. /// /// When [`Input::is_done`] is `false`, the range returned is guaranteed /// to correspond to valid bounds for this input's haystack. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let input = Input::new("foobar"); /// assert_eq!(0..6, input.get_range()); /// ``` #[inline] pub fn get_range(&self) -> Range<usize> { self.get_span().range() } /// Return the anchored mode for this search configuration. /// /// If no anchored mode was set, then it defaults to [`Anchored::No`]. /// /// # Example /// /// ``` /// use regex_automata::{Anchored, Input, PatternID}; /// /// let mut input = Input::new("foobar"); /// assert_eq!(Anchored::No, input.get_anchored()); /// /// let pid = PatternID::must(5); /// input.set_anchored(Anchored::Pattern(pid)); /// assert_eq!(Anchored::Pattern(pid), input.get_anchored()); /// ``` #[inline] pub fn get_anchored(&self) -> Anchored { self.anchored } /// Return whether this search should execute in "earliest" mode. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let input = Input::new("foobar"); /// assert!(!input.get_earliest()); /// ``` #[inline] pub fn get_earliest(&self) -> bool { self.earliest } /// Return true if and only if this search can never return any other /// matches. /// /// This occurs when the start position of this search is greater than the /// end position of the search. /// /// # Example /// /// ``` /// use regex_automata::Input; /// /// let mut input = Input::new("foobar"); /// assert!(!input.is_done()); /// input.set_start(6); /// assert!(!input.is_done()); /// input.set_start(7); /// assert!(input.is_done()); /// ``` #[inline] pub fn is_done(&self) -> bool { self.get_span().start > self.get_span().end } /// Returns true if and only if the given offset in this search's haystack /// falls on a valid UTF-8 encoded codepoint boundary. /// /// If the haystack is not valid UTF-8, then the behavior of this routine /// is unspecified. /// /// # Example /// /// This shows where codepoint boundaries do and don't exist in valid /// UTF-8. /// /// ``` /// use regex_automata::Input; /// /// let input = Input::new("☃"); /// assert!(input.is_char_boundary(0)); /// assert!(!input.is_char_boundary(1)); /// assert!(!input.is_char_boundary(2)); /// assert!(input.is_char_boundary(3)); /// assert!(!input.is_char_boundary(4)); /// ``` #[inline] pub fn is_char_boundary(&self, offset: usize) -> bool { utf8::is_boundary(self.haystack(), offset) } } impl<'h> core::fmt::Debug for Input<'h> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { use crate::util::escape::DebugHaystack; f.debug_struct("Input") .field("haystack", &DebugHaystack(self.haystack())) .field("span", &self.span) .field("anchored", &self.anchored) .field("earliest", &self.earliest) .finish() } } impl<'h, H: ?Sized + AsRef<[u8]>> From<&'h H> for Input<'h> { fn from(haystack: &'h H) -> Input<'h> { Input::new(haystack) } } /// A representation of a span reported by a regex engine. /// /// A span corresponds to the starting and ending _byte offsets_ of a /// contiguous region of bytes. The starting offset is inclusive while the /// ending offset is exclusive. That is, a span is a half-open interval. /// /// A span is used to report the offsets of a match, but it is also used to /// convey which region of a haystack should be searched via routines like /// [`Input::span`]. /// /// This is basically equivalent to a `std::ops::Range<usize>`, except this /// type implements `Copy` which makes it more ergonomic to use in the context /// of this crate. Like a range, this implements `Index` for `[u8]` and `str`, /// and `IndexMut` for `[u8]`. For convenience, this also impls `From<Range>`, /// which means things like `Span::from(5..10)` work. #[derive(Clone, Copy, Eq, Hash, PartialEq)] pub struct Span { /// The start offset of the span, inclusive. pub start: usize, /// The end offset of the span, exclusive. pub end: usize, } impl Span { /// Returns this span as a range. #[inline] pub fn range(&self) -> Range<usize> { Range::from(*self) } /// Returns true when this span is empty. That is, when `start >= end`. #[inline] pub fn is_empty(&self) -> bool { self.start >= self.end } /// Returns the length of this span. /// /// This returns `0` in precisely the cases that `is_empty` returns `true`. #[inline] pub fn len(&self) -> usize { self.end.saturating_sub(self.start) } /// Returns true when the given offset is contained within this span. /// /// Note that an empty span contains no offsets and will always return /// false. #[inline] pub fn contains(&self, offset: usize) -> bool { !self.is_empty() && self.start <= offset && offset <= self.end } /// Returns a new span with `offset` added to this span's `start` and `end` /// values. #[inline] pub fn offset(&self, offset: usize) -> Span { Span { start: self.start + offset, end: self.end + offset } } } impl core::fmt::Debug for Span { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{}..{}", self.start, self.end) } } impl core::ops::Index<Span> for [u8] { type Output = [u8]; #[inline] fn index(&self, index: Span) -> &[u8] { &self[index.range()] } } impl core::ops::IndexMut<Span> for [u8] { #[inline] fn index_mut(&mut self, index: Span) -> &mut [u8] { &mut self[index.range()] } } impl core::ops::Index<Span> for str { type Output = str; #[inline] fn index(&self, index: Span) -> &str { &self[index.range()] } } impl From<Range<usize>> for Span { #[inline] fn from(range: Range<usize>) -> Span { Span { start: range.start, end: range.end } } } impl From<Span> for Range<usize> { #[inline] fn from(span: Span) -> Range<usize> { Range { start: span.start, end: span.end } } } impl PartialEq<Range<usize>> for Span { #[inline] fn eq(&self, range: &Range<usize>) -> bool { self.start == range.start && self.end == range.end } } impl PartialEq<Span> for Range<usize> { #[inline] fn eq(&self, span: &Span) -> bool { self.start == span.start && self.end == span.end } } /// A representation of "half" of a match reported by a DFA. /// /// This is called a "half" match because it only includes the end location (or /// start location for a reverse search) of a match. This corresponds to the /// information that a single DFA scan can report. Getting the other half of /// the match requires a second scan with a reversed DFA. /// /// A half match also includes the pattern that matched. The pattern is /// identified by an ID, which corresponds to its position (starting from `0`) /// relative to other patterns used to construct the corresponding DFA. If only /// a single pattern is provided to the DFA, then all matches are guaranteed to /// have a pattern ID of `0`. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct HalfMatch { /// The pattern ID. pattern: PatternID, /// The offset of the match. /// /// For forward searches, the offset is exclusive. For reverse searches, /// the offset is inclusive. offset: usize, } impl HalfMatch { /// Create a new half match from a pattern ID and a byte offset. #[inline] pub fn new(pattern: PatternID, offset: usize) -> HalfMatch { HalfMatch { pattern, offset } } /// Create a new half match from a pattern ID and a byte offset. /// /// This is like [`HalfMatch::new`], but accepts a `usize` instead of a /// [`PatternID`]. This panics if the given `usize` is not representable /// as a `PatternID`. #[inline] pub fn must(pattern: usize, offset: usize) -> HalfMatch { HalfMatch::new(PatternID::new(pattern).unwrap(), offset) } /// Returns the ID of the pattern that matched. /// /// The ID of a pattern is derived from the position in which it was /// originally inserted into the corresponding DFA. The first pattern has /// identifier `0`, and each subsequent pattern is `1`, `2` and so on. #[inline] pub fn pattern(&self) -> PatternID { self.pattern } /// The position of the match. /// /// If this match was produced by a forward search, then the offset is /// exclusive. If this match was produced by a reverse search, then the /// offset is inclusive. #[inline] pub fn offset(&self) -> usize { self.offset } } /// A representation of a match reported by a regex engine. /// /// A match has two essential pieces of information: the [`PatternID`] that /// matches, and the [`Span`] of the match in a haystack. /// /// The pattern is identified by an ID, which corresponds to its position /// (starting from `0`) relative to other patterns used to construct the /// corresponding regex engine. If only a single pattern is provided, then all /// matches are guaranteed to have a pattern ID of `0`. /// /// Every match reported by a regex engine guarantees that its span has its /// start offset as less than or equal to its end offset. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct Match { /// The pattern ID. pattern: PatternID, /// The underlying match span. span: Span, } impl Match { /// Create a new match from a pattern ID and a span. /// /// This constructor is generic over how a span is provided. While /// a [`Span`] may be given directly, one may also provide a /// `std::ops::Range<usize>`. /// /// # Panics /// /// This panics if `end < start`. /// /// # Example /// /// This shows how to create a match for the first pattern in a regex /// object using convenient range syntax. /// /// ``` /// use regex_automata::{Match, PatternID}; /// /// let m = Match::new(PatternID::ZERO, 5..10); /// assert_eq!(0, m.pattern().as_usize()); /// assert_eq!(5, m.start()); /// assert_eq!(10, m.end()); /// ``` #[inline] pub fn new<S: Into<Span>>(pattern: PatternID, span: S) -> Match { let span: Span = span.into(); assert!(span.start <= span.end, "invalid match span"); Match { pattern, span } } /// Create a new match from a pattern ID and a byte offset span. /// /// This constructor is generic over how a span is provided. While /// a [`Span`] may be given directly, one may also provide a /// `std::ops::Range<usize>`. /// /// This is like [`Match::new`], but accepts a `usize` instead of a /// [`PatternID`]. This panics if the given `usize` is not representable /// as a `PatternID`. /// /// # Panics /// /// This panics if `end < start` or if `pattern > PatternID::MAX`. /// /// # Example /// /// This shows how to create a match for the third pattern in a regex /// object using convenient range syntax. /// /// ``` /// use regex_automata::Match; /// /// let m = Match::must(3, 5..10); /// assert_eq!(3, m.pattern().as_usize()); /// assert_eq!(5, m.start()); /// assert_eq!(10, m.end()); /// ``` #[inline] pub fn must<S: Into<Span>>(pattern: usize, span: S) -> Match { Match::new(PatternID::must(pattern), span) } /// Returns the ID of the pattern that matched. /// /// The ID of a pattern is derived from the position in which it was /// originally inserted into the corresponding regex engine. The first /// pattern has identifier `0`, and each subsequent pattern is `1`, `2` and /// so on. #[inline] pub fn pattern(&self) -> PatternID { self.pattern } /// The starting position of the match. /// /// This is a convenience routine for `Match::span().start`. #[inline] pub fn start(&self) -> usize { self.span().start } /// The ending position of the match. /// /// This is a convenience routine for `Match::span().end`. #[inline] pub fn end(&self) -> usize { self.span().end } /// Returns the match span as a range. /// /// This is a convenience routine for `Match::span().range()`. #[inline] pub fn range(&self) -> core::ops::Range<usize> { self.span().range() } /// Returns the span for this match. #[inline] pub fn span(&self) -> Span { self.span } /// Returns true when the span in this match is empty. /// /// An empty match can only be returned when the regex itself can match /// the empty string. #[inline] pub fn is_empty(&self) -> bool { self.span().is_empty() } /// Returns the length of this match. /// /// This returns `0` in precisely the cases that `is_empty` returns `true`. #[inline] pub fn len(&self) -> usize { self.span().len() } } /// A set of `PatternID`s. /// /// A set of pattern identifiers is useful for recording which patterns have /// matched a particular haystack. A pattern set _only_ includes pattern /// identifiers. It does not include offset information. /// /// # Example /// /// This shows basic usage of a set. /// /// ``` /// use regex_automata::{PatternID, PatternSet}; /// /// let pid1 = PatternID::must(5); /// let pid2 = PatternID::must(8); /// // Create a new empty set. /// let mut set = PatternSet::new(10); /// // Insert pattern IDs. /// set.insert(pid1); /// set.insert(pid2); /// // Test membership. /// assert!(set.contains(pid1)); /// assert!(set.contains(pid2)); /// // Get all members. /// assert_eq!( /// vec![5, 8], /// set.iter().map(|p| p.as_usize()).collect::<Vec<usize>>(), /// ); /// // Clear the set. /// set.clear(); /// // Test that it is indeed empty. /// assert!(set.is_empty()); /// ``` #[cfg(feature = "alloc")] #[derive(Clone, Debug, Eq, PartialEq)] pub struct PatternSet { /// The number of patterns set to 'true' in this set. len: usize, /// A map from PatternID to boolean of whether a pattern matches or not. /// /// This should probably be a bitset, but it's probably unlikely to matter /// much in practice. /// /// The main downside of this representation (and similarly for a bitset) /// is that iteration scales with the capacity of the set instead of /// the length of the set. This doesn't seem likely to be a problem in /// practice. /// /// Another alternative is to just use a 'SparseSet' for this. It does use /// more memory (quite a bit more), but that seems fine I think compared /// to the memory being used by the regex engine. The real hiccup with /// it is that it yields pattern IDs in the order they were inserted. /// Which is actually kind of nice, but at the time of writing, pattern /// IDs are yielded in ascending order in the regex crate RegexSet API. /// If we did change to 'SparseSet', we could provide an additional /// 'iter_match_order' iterator, but keep the ascending order one for /// compatibility. which: alloc::boxed::Box<[bool]>, } #[cfg(feature = "alloc")] impl PatternSet { /// Create a new set of pattern identifiers with the given capacity. /// /// The given capacity typically corresponds to (at least) the number of /// patterns in a compiled regex object. /// /// # Panics /// /// This panics if the given capacity exceeds [`PatternID::LIMIT`]. This is /// impossible if you use the `pattern_len()` method as defined on any of /// the regex engines in this crate. Namely, a regex will fail to build by /// returning an error if the number of patterns given to it exceeds the /// limit. Therefore, the number of patterns in a valid regex is always /// a correct capacity to provide here. pub fn new(capacity: usize) -> PatternSet { assert!( capacity <= PatternID::LIMIT, "pattern set capacity exceeds limit of {}", PatternID::LIMIT, ); PatternSet { len: 0, which: alloc::vec![false; capacity].into_boxed_slice(), } } /// Clear this set such that it contains no pattern IDs. pub fn clear(&mut self) { self.len = 0; for matched in self.which.iter_mut() { *matched = false; } } /// Return true if and only if the given pattern identifier is in this set. pub fn contains(&self, pid: PatternID) -> bool { pid.as_usize() < self.capacity() && self.which[pid] } /// Insert the given pattern identifier into this set and return `true` if /// the given pattern ID was not previously in this set. /// /// If the pattern identifier is already in this set, then this is a no-op. /// /// Use [`PatternSet::try_insert`] for a fallible version of this routine. /// /// # Panics /// /// This panics if this pattern set has insufficient capacity to /// store the given pattern ID. pub fn insert(&mut self, pid: PatternID) -> bool { self.try_insert(pid) .expect("PatternSet should have sufficient capacity") } /// Insert the given pattern identifier into this set and return `true` if /// the given pattern ID was not previously in this set. /// /// If the pattern identifier is already in this set, then this is a no-op. /// /// # Errors /// /// This returns an error if this pattern set has insufficient capacity to /// store the given pattern ID. pub fn try_insert( &mut self, pid: PatternID, ) -> Result<bool, PatternSetInsertError> { if pid.as_usize() >= self.capacity() { return Err(PatternSetInsertError { attempted: pid, capacity: self.capacity(), }); } if self.which[pid] { return Ok(false); } self.len += 1; self.which[pid] = true; Ok(true) } /* // This is currently commented out because it is unused and it is unclear // whether it's useful or not. What's the harm in having it? When, if // we ever wanted to change our representation to a 'SparseSet', then // supporting this method would be a bit tricky. So in order to keep some // API evolution flexibility, we leave it out for now. /// Remove the given pattern identifier from this set. /// /// If the pattern identifier was not previously in this set, then this /// does not change the set and returns `false`. /// /// # Panics /// /// This panics if `pid` exceeds the capacity of this set. pub fn remove(&mut self, pid: PatternID) -> bool { if !self.which[pid] { return false; } self.len -= 1; self.which[pid] = false; true } */ /// Return true if and only if this set has no pattern identifiers in it. pub fn is_empty(&self) -> bool { self.len() == 0 } /// Return true if and only if this set has the maximum number of pattern /// identifiers in the set. This occurs precisely when `PatternSet::len() /// == PatternSet::capacity()`. /// /// This particular property is useful to test because it may allow one to /// stop a search earlier than you might otherwise. Namely, if a search is /// only reporting which patterns match a haystack and if you know all of /// the patterns match at a given point, then there's no new information /// that can be learned by continuing the search. (Because a pattern set /// does not keep track of offset information.) pub fn is_full(&self) -> bool { self.len() == self.capacity() } /// Returns the total number of pattern identifiers in this set. pub fn len(&self) -> usize { self.len } /// Returns the total number of pattern identifiers that may be stored /// in this set. /// /// This is guaranteed to be less than or equal to [`PatternID::LIMIT`]. /// /// Typically, the capacity of a pattern set matches the number of patterns /// in a regex object with which you are searching. pub fn capacity(&self) -> usize { self.which.len() } /// Returns an iterator over all pattern identifiers in this set. /// /// The iterator yields pattern identifiers in ascending order, starting /// at zero. pub fn iter(&self) -> PatternSetIter<'_> { PatternSetIter { it: self.which.iter().enumerate() } } } /// An error that occurs when a `PatternID` failed to insert into a /// `PatternSet`. /// /// An insert fails when the given `PatternID` exceeds the configured capacity /// of the `PatternSet`. /// /// This error is created by the [`PatternSet::try_insert`] routine. #[cfg(feature = "alloc")] #[derive(Clone, Debug)] pub struct PatternSetInsertError { attempted: PatternID, capacity: usize, } #[cfg(feature = "std")] impl std::error::Error for PatternSetInsertError {} #[cfg(feature = "alloc")] impl core::fmt::Display for PatternSetInsertError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!( f, "failed to insert pattern ID {} into pattern set \ with insufficiet capacity of {}", self.attempted.as_usize(), self.capacity, ) } } /// An iterator over all pattern identifiers in a [`PatternSet`]. /// /// The lifetime parameter `'a` refers to the lifetime of the pattern set being /// iterated over. /// /// This iterator is created by the [`PatternSet::iter`] method. #[cfg(feature = "alloc")] #[derive(Clone, Debug)] pub struct PatternSetIter<'a> { it: core::iter::Enumerate<core::slice::Iter<'a, bool>>, } #[cfg(feature = "alloc")] impl<'a> Iterator for PatternSetIter<'a> { type Item = PatternID; fn next(&mut self) -> Option<PatternID> { while let Some((index, &yes)) = self.it.next() { if yes { // Only valid 'PatternID' values can be inserted into the set // and construction of the set panics if the capacity would // permit storing invalid pattern IDs. Thus, 'yes' is only true // precisely when 'index' corresponds to a valid 'PatternID'. return Some(PatternID::new_unchecked(index)); } } None } fn size_hint(&self) -> (usize, Option<usize>) { self.it.size_hint() } } #[cfg(feature = "alloc")] impl<'a> DoubleEndedIterator for PatternSetIter<'a> { fn next_back(&mut self) -> Option<PatternID> { while let Some((index, &yes)) = self.it.next_back() { if yes { // Only valid 'PatternID' values can be inserted into the set // and construction of the set panics if the capacity would // permit storing invalid pattern IDs. Thus, 'yes' is only true // precisely when 'index' corresponds to a valid 'PatternID'. return Some(PatternID::new_unchecked(index)); } } None } } /// The type of anchored search to perform. /// /// This is *almost* a boolean option. That is, you can either do an unanchored /// search for any pattern in a regex, or you can do an anchored search for any /// pattern in a regex. /// /// A third option exists that, assuming the regex engine supports it, permits /// you to do an anchored search for a specific pattern. /// /// Note that there is no way to run an unanchored search for a specific /// pattern. If you need that, you'll need to build separate regexes for each /// pattern. /// /// # Errors /// /// If a regex engine does not support the anchored mode selected, then the /// regex engine will return an error. While any non-trivial regex engine /// should support at least one of the available anchored modes, there is no /// singular mode that is guaranteed to be universally supported. Some regex /// engines might only support unanchored searches (DFAs compiled without /// anchored starting states) and some regex engines might only support /// anchored searches (like the one-pass DFA). /// /// The specific error returned is a [`MatchError`] with a /// [`MatchErrorKind::UnsupportedAnchored`] kind. The kind includes the /// `Anchored` value given that is unsupported. /// /// Note that regex engines should report "no match" if, for example, an /// `Anchored::Pattern` is provided with an invalid pattern ID _but_ where /// anchored searches for a specific pattern are supported. This is smooths out /// behavior such that it's possible to guarantee that an error never occurs /// based on how the regex engine is configured. All regex engines in this /// crate report "no match" when searching for an invalid pattern ID, but where /// searching for a valid pattern ID is otherwise supported. /// /// # Example /// /// This example shows how to use the various `Anchored` modes to run a /// search. We use the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) /// because it supports all modes unconditionally. Some regex engines, like /// the [`onepass::DFA`](crate::dfa::onepass::DFA) cannot support unanchored /// searches. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Anchored, Input, Match, PatternID, /// }; /// /// let re = PikeVM::new_many(&[ /// r"Mrs. \w+", /// r"Miss \w+", /// r"Mr. \w+", /// r"Ms. \w+", /// ])?; /// let mut cache = re.create_cache(); /// let hay = "Hello Mr. Springsteen!"; /// /// // The default is to do an unanchored search. /// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, hay)); /// // Explicitly ask for an unanchored search. Same as above. /// let input = Input::new(hay).anchored(Anchored::No); /// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, hay)); /// /// // Now try an anchored search. Since the match doesn't start at the /// // beginning of the haystack, no match is found! /// let input = Input::new(hay).anchored(Anchored::Yes); /// assert_eq!(None, re.find(&mut cache, input)); /// /// // We can try an anchored search again, but move the location of where /// // we start the search. Note that the offsets reported are still in /// // terms of the overall haystack and not relative to where we started /// // the search. /// let input = Input::new(hay).anchored(Anchored::Yes).range(6..); /// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, input)); /// /// // Now try an anchored search for a specific pattern. We specifically /// // choose a pattern that we know doesn't match to prove that the search /// // only looks for the pattern we provide. /// let input = Input::new(hay) /// .anchored(Anchored::Pattern(PatternID::must(1))) /// .range(6..); /// assert_eq!(None, re.find(&mut cache, input)); /// /// // But if we switch it to the pattern that we know matches, then we find /// // the match. /// let input = Input::new(hay) /// .anchored(Anchored::Pattern(PatternID::must(2))) /// .range(6..); /// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Anchored { /// Run an unanchored search. This means a match may occur anywhere at or /// after the start position of the search. /// /// This search can return a match for any pattern in the regex. No, /// Run an anchored search. This means that a match must begin at the /// start position of the search. /// /// This search can return a match for any pattern in the regex. Yes, /// Run an anchored search for a specific pattern. This means that a match /// must be for the given pattern and must begin at the start position of /// the search. Pattern(PatternID), } impl Anchored { /// Returns true if and only if this anchor mode corresponds to any kind of /// anchored search. /// /// # Example /// /// This examples shows that both `Anchored::Yes` and `Anchored::Pattern` /// are considered anchored searches. /// /// ``` /// use regex_automata::{Anchored, PatternID}; /// /// assert!(!Anchored::No.is_anchored()); /// assert!(Anchored::Yes.is_anchored()); /// assert!(Anchored::Pattern(PatternID::ZERO).is_anchored()); /// ``` #[inline] pub fn is_anchored(&self) -> bool { matches!(*self, Anchored::Yes | Anchored::Pattern(_)) } /// Returns the pattern ID associated with this configuration if it is an /// anchored search for a specific pattern. Otherwise `None` is returned. /// /// # Example /// /// ``` /// use regex_automata::{Anchored, PatternID}; /// /// assert_eq!(None, Anchored::No.pattern()); /// assert_eq!(None, Anchored::Yes.pattern()); /// /// let pid = PatternID::must(5); /// assert_eq!(Some(pid), Anchored::Pattern(pid).pattern()); /// ``` #[inline] pub fn pattern(&self) -> Option<PatternID> { match *self { Anchored::Pattern(pid) => Some(pid), _ => None, } } } /// The kind of match semantics to use for a regex pattern. /// /// The default match kind is `LeftmostFirst`, and this corresponds to the /// match semantics used by most backtracking engines, such as Perl. /// /// # Leftmost first or "preference order" match semantics /// /// Leftmost-first semantics determine which match to report when there are /// multiple paths through a regex that match at the same position. The tie is /// essentially broken by how a backtracker would behave. For example, consider /// running the regex `foofoofoo|foofoo|foo` on the haystack `foofoo`. In this /// case, both the `foofoo` and `foo` branches match at position `0`. So should /// the end of the match be `3` or `6`? /// /// A backtracker will conceptually work by trying `foofoofoo` and failing. /// Then it will try `foofoo`, find the match and stop there. Thus, the /// leftmost-first match position is `6`. This is called "leftmost-first" or /// "preference order" because the order of the branches as written in the /// regex pattern is what determines how to break the tie. /// /// (Note that leftmost-longest match semantics, which break ties by always /// taking the longest matching string, are not currently supported by this /// crate. These match semantics tend to be found in POSIX regex engines.) /// /// This example shows how leftmost-first semantics work, and how it even /// applies to multi-pattern regexes: /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Match, /// }; /// /// let re = PikeVM::new_many(&[ /// r"foofoofoo", /// r"foofoo", /// r"foo", /// ])?; /// let mut cache = re.create_cache(); /// let got: Vec<Match> = re.find_iter(&mut cache, "foofoo").collect(); /// let expected = vec![Match::must(1, 0..6)]; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # All matches /// /// The `All` match semantics report any and all matches, and generally will /// attempt to match as much as possible. It doesn't respect any sort of match /// priority at all, so things like non-greedy matching don't work in this /// mode. /// /// The fact that non-greedy matching doesn't work generally makes most forms /// of unanchored non-overlapping searches have unintuitive behavior. Namely, /// unanchored searches behave as if there is a `(?s-u:.)*?` prefix at the /// beginning of the pattern, which is specifically non-greedy. Since it will /// be treated as greedy in `All` match semantics, this generally means that /// it will first attempt to consume all of the haystack and is likely to wind /// up skipping matches. /// /// Generally speaking, `All` should only be used in two circumstances: /// /// * When running an anchored search and there is a desire to match as much as /// possible. For example, when building a reverse regex matcher to find the /// start of a match after finding the end. In this case, the reverse search /// is anchored to the end of the match found by the forward search. /// * When running overlapping searches. Since `All` encodes all possible /// matches, this is generally what you want for an overlapping search. If you /// try to use leftmost-first in an overlapping search, it is likely to produce /// counter-intuitive results since leftmost-first specifically excludes some /// matches from its underlying finite state machine. /// /// This example demonstrates the counter-intuitive behavior of `All` semantics /// when using a standard leftmost unanchored search: /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Match, MatchKind, /// }; /// /// let re = PikeVM::builder() /// .configure(PikeVM::config().match_kind(MatchKind::All)) /// .build("foo")?; /// let hay = "first foo second foo wat"; /// let mut cache = re.create_cache(); /// let got: Vec<Match> = re.find_iter(&mut cache, hay).collect(); /// // Notice that it completely skips the first 'foo'! /// let expected = vec![Match::must(0, 17..20)]; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This second example shows how `All` semantics are useful for an overlapping /// search. Note that we use lower level lazy DFA APIs here since the NFA /// engines only currently support a very limited form of overlapping search. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::{DFA, OverlappingState}, /// HalfMatch, Input, MatchKind, /// }; /// /// let re = DFA::builder() /// // If we didn't set 'All' semantics here, then the regex would only /// // match 'foo' at offset 3 and nothing else. Why? Because the state /// // machine implements preference order and knows that the 'foofoo' and /// // 'foofoofoo' branches can never match since 'foo' will always match /// // when they match and take priority. /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build(r"foo|foofoo|foofoofoo")?; /// let mut cache = re.create_cache(); /// let mut state = OverlappingState::start(); /// let input = Input::new("foofoofoo"); /// let mut got = vec![]; /// loop { /// re.try_search_overlapping_fwd(&mut cache, &input, &mut state)?; /// let m = match state.get_match() { /// None => break, /// Some(m) => m, /// }; /// got.push(m); /// } /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(0, 6), /// HalfMatch::must(0, 9), /// ]; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[non_exhaustive] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum MatchKind { /// Report all possible matches. All, /// Report only the leftmost matches. When multiple leftmost matches exist, /// report the match corresponding to the part of the regex that appears /// first in the syntax. LeftmostFirst, // There is prior art in RE2 that shows that we should be able to add // LeftmostLongest too. The tricky part of it is supporting ungreedy // repetitions. Instead of treating all NFA states as having equivalent // priority (as in 'All') or treating all NFA states as having distinct // priority based on order (as in 'LeftmostFirst'), we instead group NFA // states into sets, and treat members of each set as having equivalent // priority, but having greater priority than all following members // of different sets. // // However, it's not clear whether it's really worth adding this. After // all, leftmost-longest can be emulated when using literals by using // leftmost-first and sorting the literals by length in descending order. // However, this won't work for arbitrary regexes. e.g., `\w|\w\w` will // always match `a` in `ab` when using leftmost-first, but leftmost-longest // would match `ab`. } impl MatchKind { #[cfg(feature = "alloc")] pub(crate) fn continue_past_first_match(&self) -> bool { *self == MatchKind::All } } impl Default for MatchKind { fn default() -> MatchKind { MatchKind::LeftmostFirst } } /// An error indicating that a search stopped before reporting whether a /// match exists or not. /// /// To be very clear, this error type implies that one cannot assume that no /// matches occur, since the search stopped before completing. That is, if /// you're looking for information about where a search determined that no /// match can occur, then this error type does *not* give you that. (Indeed, at /// the time of writing, if you need such a thing, you have to write your own /// search routine.) /// /// Normally, when one searches for something, the response is either an /// affirmative "it was found at this location" or a negative "not found at /// all." However, in some cases, a regex engine can be configured to stop its /// search before concluding whether a match exists or not. When this happens, /// it may be important for the caller to know why the regex engine gave up and /// where in the input it gave up at. This error type exposes the 'why' and the /// 'where.' /// /// For example, the DFAs provided by this library generally cannot correctly /// implement Unicode word boundaries. Instead, they provide an option to /// eagerly support them on ASCII text (since Unicode word boundaries are /// equivalent to ASCII word boundaries when searching ASCII text), but will /// "give up" if a non-ASCII byte is seen. In such cases, one is usually /// required to either report the failure to the caller (unergonomic) or /// otherwise fall back to some other regex engine (ergonomic, but potentially /// costly). /// /// More generally, some regex engines offer the ability for callers to specify /// certain bytes that will trigger the regex engine to automatically quit if /// they are seen. /// /// Still yet, there may be other reasons for a failed match. For example, /// the hybrid DFA provided by this crate can be configured to give up if it /// believes that it is not efficient. This in turn permits callers to choose a /// different regex engine. /// /// (Note that DFAs are configured by default to never quit or give up in this /// fashion. For example, by default, a DFA will fail to build if the regex /// pattern contains a Unicode word boundary. One needs to opt into the "quit" /// behavior via options, like /// [`hybrid::dfa::Config::unicode_word_boundary`](crate::hybrid::dfa::Config::unicode_word_boundary).) /// /// There are a couple other ways a search /// can fail. For example, when using the /// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker) /// with a haystack that is too long, or trying to run an unanchored search /// with a [one-pass DFA](crate::dfa::onepass). #[derive(Clone, Debug, Eq, PartialEq)] pub struct MatchError( #[cfg(feature = "alloc")] alloc::boxed::Box<MatchErrorKind>, #[cfg(not(feature = "alloc"))] MatchErrorKind, ); impl MatchError { /// Create a new error value with the given kind. /// /// This is a more verbose version of the kind-specific constructors, /// e.g., `MatchError::quit`. pub fn new(kind: MatchErrorKind) -> MatchError { #[cfg(feature = "alloc")] { MatchError(alloc::boxed::Box::new(kind)) } #[cfg(not(feature = "alloc"))] { MatchError(kind) } } /// Returns a reference to the underlying error kind. pub fn kind(&self) -> &MatchErrorKind { &self.0 } /// Create a new "quit" error. The given `byte` corresponds to the value /// that tripped a search's quit condition, and `offset` corresponds to the /// location in the haystack at which the search quit. /// /// This is the same as calling `MatchError::new` with a /// [`MatchErrorKind::Quit`] kind. pub fn quit(byte: u8, offset: usize) -> MatchError { MatchError::new(MatchErrorKind::Quit { byte, offset }) } /// Create a new "gave up" error. The given `offset` corresponds to the /// location in the haystack at which the search gave up. /// /// This is the same as calling `MatchError::new` with a /// [`MatchErrorKind::GaveUp`] kind. pub fn gave_up(offset: usize) -> MatchError { MatchError::new(MatchErrorKind::GaveUp { offset }) } /// Create a new "haystack too long" error. The given `len` corresponds to /// the length of the haystack that was problematic. /// /// This is the same as calling `MatchError::new` with a /// [`MatchErrorKind::HaystackTooLong`] kind. pub fn haystack_too_long(len: usize) -> MatchError { MatchError::new(MatchErrorKind::HaystackTooLong { len }) } /// Create a new "unsupported anchored" error. This occurs when the caller /// requests a search with an anchor mode that is not supported by the /// regex engine. /// /// This is the same as calling `MatchError::new` with a /// [`MatchErrorKind::UnsupportedAnchored`] kind. pub fn unsupported_anchored(mode: Anchored) -> MatchError { MatchError::new(MatchErrorKind::UnsupportedAnchored { mode }) } } /// The underlying kind of a [`MatchError`]. /// /// This is a **non-exhaustive** enum. That means new variants may be added in /// a semver-compatible release. #[non_exhaustive] #[derive(Clone, Debug, Eq, PartialEq)] pub enum MatchErrorKind { /// The search saw a "quit" byte at which it was instructed to stop /// searching. Quit { /// The "quit" byte that was observed that caused the search to stop. byte: u8, /// The offset at which the quit byte was observed. offset: usize, }, /// The search, based on heuristics, determined that it would be better /// to stop, typically to provide the caller an opportunity to use an /// alternative regex engine. /// /// Currently, the only way for this to occur is via the lazy DFA and /// only when it is configured to do so (it will not return this error by /// default). GaveUp { /// The offset at which the search stopped. This corresponds to the /// position immediately following the last byte scanned. offset: usize, }, /// This error occurs if the haystack given to the regex engine was too /// long to be searched. This occurs, for example, with regex engines /// like the bounded backtracker that have a configurable fixed amount of /// capacity that is tied to the length of the haystack. Anything beyond /// that configured limit will result in an error at search time. HaystackTooLong { /// The length of the haystack that exceeded the limit. len: usize, }, /// An error indicating that a particular type of anchored search was /// requested, but that the regex engine does not support it. /// /// Note that this error should not be returned by a regex engine simply /// because the pattern ID is invalid (i.e., equal to or exceeds the number /// of patterns in the regex). In that case, the regex engine should report /// a non-match. UnsupportedAnchored { /// The anchored mode given that is unsupported. mode: Anchored, }, } #[cfg(feature = "std")] impl std::error::Error for MatchError {} impl core::fmt::Display for MatchError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match *self.kind() { MatchErrorKind::Quit { byte, offset } => write!( f, "quit search after observing byte {:?} at offset {}", DebugByte(byte), offset, ), MatchErrorKind::GaveUp { offset } => { write!(f, "gave up searching at offset {}", offset) } MatchErrorKind::HaystackTooLong { len } => { write!(f, "haystack of length {} is too long", len) } MatchErrorKind::UnsupportedAnchored { mode: Anchored::Yes } => { write!(f, "anchored searches are not supported or enabled") } MatchErrorKind::UnsupportedAnchored { mode: Anchored::No } => { write!(f, "unanchored searches are not supported or enabled") } MatchErrorKind::UnsupportedAnchored { mode: Anchored::Pattern(pid), } => { write!( f, "anchored searches for a specific pattern ({}) are \ not supported or enabled", pid.as_usize(), ) } } } } #[cfg(test)] mod tests { use super::*; // We test that our 'MatchError' type is the size we expect. This isn't an // API guarantee, but if the size increases, we really want to make sure we // decide to do that intentionally. So this should be a speed bump. And in // general, we should not increase the size without a very good reason. // // Why? Because low level search APIs return Result<.., MatchError>. When // MatchError gets bigger, so to does the Result type. // // Now, when 'alloc' is enabled, we do box the error, which de-emphasizes // the importance of keeping a small error type. But without 'alloc', we // still want things to be small. #[test] fn match_error_size() { let expected_size = if cfg!(feature = "alloc") { core::mem::size_of::<usize>() } else { 2 * core::mem::size_of::<usize>() }; assert_eq!(expected_size, core::mem::size_of::<MatchError>()); } // Same as above, but for the underlying match error kind. #[cfg(target_pointer_width = "64")] #[test] fn match_error_kind_size() { let expected_size = 2 * core::mem::size_of::<usize>(); assert_eq!(expected_size, core::mem::size_of::<MatchErrorKind>()); } #[cfg(target_pointer_width = "32")] #[test] fn match_error_kind_size() { let expected_size = 3 * core::mem::size_of::<usize>(); assert_eq!(expected_size, core::mem::size_of::<MatchErrorKind>()); } } <file_sep>/regex-cli/args/input.rs use regex_automata::{Anchored, Input, PatternID}; use lexopt::{Arg, Parser}; use crate::args::{self, Configurable, Usage}; /// This exposes all of the configuration knobs on a regex_automata::Input via /// CLI flags. The only aspect of regex_automata::Input that this does not /// cover is the haystack, which should be provided by other means (usually /// with `Haystack`). #[derive(Debug, Default)] pub struct Config { start: Option<usize>, end: Option<usize>, anchored: bool, pattern_id: Option<PatternID>, earliest: bool, } impl Config { /// Return an `Input` given the haystack to search. The input configuration /// (other than the haystack) is drawn from this configuration. /// /// If an `Input` could not be constructed from this configuration (for /// example, invalid start/end bounds), then an error is returned. pub fn input<'h>(&self, haystack: &'h [u8]) -> anyhow::Result<Input<'h>> { let mut input = Input::new(haystack).earliest(self.earliest); if let Some(start) = self.start { anyhow::ensure!( start <= haystack.len(), "start bound {} exceeds haystack length {}", start, haystack.len(), ); input.set_start(start); } if let Some(end) = self.end { anyhow::ensure!( end <= haystack.len(), "end bound {} exceeds haystack length {}", end, haystack.len(), ); input.set_end(end); } if let Some(pid) = self.pattern_id { input.set_anchored(Anchored::Pattern(pid)); } else if self.anchored { input.set_anchored(Anchored::Yes) } else { // The default, but we set it explicitly anyway. input.set_anchored(Anchored::No) } Ok(input) } /// Pass the `Input` (derived from this configuration) to the closure /// given. Any error returned by the closure is returned by this routine. /// Similarly, an error is returned if an `Input` could not be constructed /// from this configuration. /// /// The `Input` is constructed with the given haystack. The intent of this /// routine is that if the haystack is specified via a file path, then this /// will memory map the haystack. pub fn with<T>( &self, haystack: &args::haystack::Config, mut f: impl FnMut(Input<'_>) -> anyhow::Result<T>, ) -> anyhow::Result<T> { haystack.with(|bytes| f(self.input(bytes)?)) } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("start") => { self.start = Some(args::parse(p, "--start")?); } Arg::Long("end") => { self.end = Some(args::parse(p, "--end")?); } Arg::Short('a') | Arg::Long("anchored") => { self.anchored = true; } Arg::Long("pattern-id") => { let pid = args::parse(p, "--pattern-id")?; self.pattern_id = Some(PatternID::new(pid)?); } Arg::Long("earliest") => { self.earliest = true; } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "--start <bound>", "Set the start of the search.", r#" This sets the start bound of a search. It must be a valid offset for the haystack, up to and including the length of the haystack. When not set, the start bound is 0. "#, ), Usage::new( "--end <bound>", "Set the end of the search.", r#" This sets the end bound of a search. It must be a valid offset for the haystack, up to and including the length of the haystack. When not set, the end bound is the length of the haystack. "#, ), Usage::new( "-a, --anchored", "Enable anchored mode for the search.", r#" Enabled anchored mode for the search. When enabled and if a match is found, the start of the match is guaranteed to be equivalent to the start bound of the search. "#, ), Usage::new( "--pattern-id <pid>", "Set pattern to search for.", r#" Set the pattern to search for. This automatically enables anchored mode for the search since regex engines for this crate only support anchored searches for specific patterns. When set and if a match is found, the start of the match is guaranteed to be equivalent to the start bound of the search and the pattern ID is guaranteed to be equivalent to the one set by this flag. When not set, a search may match any of the patterns given. "#, ), Usage::new( "--earliest", "Returns a match as soon as it is known.", r#" This enables "earliest" mode, which asks the regex engine to stop searching as soon as a match is found. The specific offset returned may vary depending on the regex engine since not all regex engines detect matches in the same way. "#, ), ]; USAGES } } <file_sep>/regex-lite/src/string.rs use alloc::{ borrow::Cow, boxed::Box, string::String, string::ToString, sync::Arc, vec, vec::Vec, }; use crate::{ error::Error, hir::{self, Hir}, int::NonMaxUsize, interpolate, nfa::{self, NFA}, pikevm::{self, Cache, PikeVM}, pool::CachePool, }; /// A compiled regular expression for searching Unicode haystacks. /// /// A `Regex` can be used to search haystacks, split haystacks into substrings /// or replace substrings in a haystack with a different substring. All /// searching is done with an implicit `(?s:.)*?` at the beginning and end of /// an pattern. To force an expression to match the whole string (or a prefix /// or a suffix), you must use an anchor like `^` or `$` (or `\A` and `\z`). /// /// While this crate will handle Unicode strings (whether in the regular /// expression or in the haystack), all positions returned are **byte /// offsets**. Every byte offset is guaranteed to be at a Unicode code point /// boundary. That is, all offsets returned by the `Regex` API are guaranteed /// to be ranges that can slice a `&str` without panicking. /// /// The only methods that allocate new strings are the string replacement /// methods. All other methods (searching and splitting) return borrowed /// references into the haystack given. /// /// # Example /// /// Find the offsets of a US phone number: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new("[0-9]{3}-[0-9]{3}-[0-9]{4}").unwrap(); /// let m = re.find("phone: 111-222-3333").unwrap(); /// assert_eq!(7..19, m.range()); /// ``` /// /// # Example: extracting capture groups /// /// A common way to use regexes is with capture groups. That is, instead of /// just looking for matches of an entire regex, parentheses are used to create /// groups that represent part of the match. /// /// For example, consider a haystack with multiple lines, and each line has /// three whitespace delimited fields where the second field is expected to be /// a number and the third field a boolean. To make this convenient, we use /// the [`Captures::extract`] API to put the strings that match each group /// into a fixed size array: /// /// ``` /// use regex_lite::Regex; /// /// let hay = " /// rabbit 54 true /// groundhog 2 true /// does not match /// fox 109 false /// "; /// let re = Regex::new(r"(?m)^\s*(\S+)\s+([0-9]+)\s+(true|false)\s*$").unwrap(); /// let mut fields: Vec<(&str, i64, bool)> = vec![]; /// for (_, [f1, f2, f3]) in re.captures_iter(hay).map(|caps| caps.extract()) { /// fields.push((f1, f2.parse()?, f3.parse()?)); /// } /// assert_eq!(fields, vec![ /// ("rabbit", 54, true), /// ("groundhog", 2, true), /// ("fox", 109, false), /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub struct Regex { pikevm: Arc<PikeVM>, pool: CachePool, } impl Clone for Regex { fn clone(&self) -> Regex { let pikevm = Arc::clone(&self.pikevm); let pool = { let pikevm = Arc::clone(&self.pikevm); let create = Box::new(move || Cache::new(&pikevm)); CachePool::new(create) }; Regex { pikevm, pool } } } impl core::fmt::Display for Regex { /// Shows the original regular expression. fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{}", self.as_str()) } } impl core::fmt::Debug for Regex { /// Shows the original regular expression. fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_tuple("Regex").field(&self.as_str()).finish() } } impl core::str::FromStr for Regex { type Err = Error; /// Attempts to parse a string into a regular expression fn from_str(s: &str) -> Result<Regex, Error> { Regex::new(s) } } impl TryFrom<&str> for Regex { type Error = Error; /// Attempts to parse a string into a regular expression fn try_from(s: &str) -> Result<Regex, Error> { Regex::new(s) } } impl TryFrom<String> for Regex { type Error = Error; /// Attempts to parse a string into a regular expression fn try_from(s: String) -> Result<Regex, Error> { Regex::new(&s) } } /// Core regular expression methods. impl Regex { /// Compiles a regular expression. Once compiled, it can be used repeatedly /// to search, split or replace substrings in a haystack. /// /// Note that regex compilation tends to be a somewhat expensive process, /// and unlike higher level environments, compilation is not automatically /// cached for you. One should endeavor to compile a regex once and then /// reuse it. For example, it's a bad idea to compile the same regex /// repeatedly in a loop. /// /// # Errors /// /// If an invalid pattern is given, then an error is returned. /// An error is also returned if the pattern is valid, but would /// produce a regex that is bigger than the configured size limit via /// [`RegexBuilder::size_limit`]. (A reasonable size limit is enabled by /// default.) /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// // An Invalid pattern because of an unclosed parenthesis /// assert!(Regex::new(r"foo(bar").is_err()); /// // An invalid pattern because the regex would be too big /// // because Unicode tends to inflate things. /// assert!(Regex::new(r"\w{1000000}").is_err()); /// ``` pub fn new(pattern: &str) -> Result<Regex, Error> { RegexBuilder::new(pattern).build() } /// Returns true if and only if there is a match for the regex anywhere /// in the haystack given. /// /// It is recommended to use this method if all you need to do is test /// whether a match exists, since the underlying matching engine may be /// able to do less work. /// /// # Example /// /// Test if some haystack contains at least one word with exactly 13 /// word characters: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\b\w{13}\b").unwrap(); /// let hay = "I categorically deny having triskaidekaphobia."; /// assert!(re.is_match(hay)); /// ``` #[inline] pub fn is_match(&self, haystack: &str) -> bool { self.is_match_at(haystack, 0) } /// This routine searches for the first match of this regex in the /// haystack given, and if found, returns a [`Match`]. The `Match` /// provides access to both the byte offsets of the match and the actual /// substring that matched. /// /// Note that this should only be used if you want to find the entire /// match. If instead you just want to test the existence of a match, /// it's potentially faster to use `Regex::is_match(hay)` instead of /// `Regex::find(hay).is_some()`. /// /// # Example /// /// Find the first word with exactly 13 word characters: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\b\w{13}\b").unwrap(); /// let hay = "I categorically deny having triskaidekaphobia."; /// let mat = re.find(hay).unwrap(); /// assert_eq!(2..15, mat.range()); /// assert_eq!("categorically", mat.as_str()); /// ``` #[inline] pub fn find<'h>(&self, haystack: &'h str) -> Option<Match<'h>> { self.find_at(haystack, 0) } /// Returns an iterator that yields successive non-overlapping matches in /// the given haystack. The iterator yields values of type [`Match`]. /// /// # Time complexity /// /// Note that since `find_iter` runs potentially many searches on the /// haystack and since each search has worst case `O(m * n)` time /// complexity, the overall worst case time complexity for iteration is /// `O(m * n^2)`. /// /// # Example /// /// Find every word with exactly 13 word characters: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\b\w{13}\b").unwrap(); /// let hay = "Retroactively relinquishing remunerations is reprehensible."; /// let matches: Vec<_> = re.find_iter(hay).map(|m| m.as_str()).collect(); /// assert_eq!(matches, vec![ /// "Retroactively", /// "relinquishing", /// "remunerations", /// "reprehensible", /// ]); /// ``` #[inline] pub fn find_iter<'r, 'h>(&'r self, haystack: &'h str) -> Matches<'r, 'h> { Matches { haystack, it: self.pikevm.find_iter(self.pool.get(), haystack.as_bytes()), } } /// This routine searches for the first match of this regex in the haystack /// given, and if found, returns not only the overall match but also the /// matches of each capture group in the regex. If no match is found, then /// `None` is returned. /// /// Capture group `0` always corresponds to an implicit unnamed group that /// includes the entire match. If a match is found, this group is always /// present. Subsequent groups may be named and are numbered, starting /// at 1, by the order in which the opening parenthesis appears in the /// pattern. For example, in the pattern `(?<a>.(?<b>.))(?<c>.)`, `a`, /// `b` and `c` correspond to capture group indices `1`, `2` and `3`, /// respectively. /// /// You should only use `captures` if you need access to the capture group /// matches. Otherwise, [`Regex::find`] is generally faster for discovering /// just the overall match. /// /// # Example /// /// Say you have some haystack with movie names and their release years, /// like "'Citizen Kane' (1941)". It'd be nice if we could search for /// substrings looking like that, while also extracting the movie name and /// its release year separately. The example below shows how to do that. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; /// let caps = re.captures(hay).unwrap(); /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)"); /// assert_eq!(caps.get(1).unwrap().as_str(), "Citizen Kane"); /// assert_eq!(caps.get(2).unwrap().as_str(), "1941"); /// // You can also access the groups by index using the Index notation. /// // Note that this will panic on an invalid index. In this case, these /// // accesses are always correct because the overall regex will only /// // match when these capture groups match. /// assert_eq!(&caps[0], "'Citizen Kane' (1941)"); /// assert_eq!(&caps[1], "Citizen Kane"); /// assert_eq!(&caps[2], "1941"); /// ``` /// /// Note that the full match is at capture group `0`. Each subsequent /// capture group is indexed by the order of its opening `(`. /// /// We can make this example a bit clearer by using *named* capture groups: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>\d{4})\)").unwrap(); /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; /// let caps = re.captures(hay).unwrap(); /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)"); /// assert_eq!(caps.name("title").unwrap().as_str(), "Citizen Kane"); /// assert_eq!(caps.name("year").unwrap().as_str(), "1941"); /// // You can also access the groups by name using the Index notation. /// // Note that this will panic on an invalid group name. In this case, /// // these accesses are always correct because the overall regex will /// // only match when these capture groups match. /// assert_eq!(&caps[0], "'Citizen Kane' (1941)"); /// assert_eq!(&caps["title"], "Citizen Kane"); /// assert_eq!(&caps["year"], "1941"); /// ``` /// /// Here we name the capture groups, which we can access with the `name` /// method or the `Index` notation with a `&str`. Note that the named /// capture groups are still accessible with `get` or the `Index` notation /// with a `usize`. /// /// The `0`th capture group is always unnamed, so it must always be /// accessed with `get(0)` or `[0]`. /// /// Finally, one other way to to get the matched substrings is with the /// [`Captures::extract`] API: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; /// let (full, [title, year]) = re.captures(hay).unwrap().extract(); /// assert_eq!(full, "'Citizen Kane' (1941)"); /// assert_eq!(title, "Citizen Kane"); /// assert_eq!(year, "1941"); /// ``` #[inline] pub fn captures<'h>(&self, haystack: &'h str) -> Option<Captures<'h>> { self.captures_at(haystack, 0) } /// Returns an iterator that yields successive non-overlapping matches in /// the given haystack. The iterator yields values of type [`Captures`]. /// /// This is the same as [`Regex::find_iter`], but instead of only providing /// access to the overall match, each value yield includes access to the /// matches of all capture groups in the regex. Reporting this extra match /// data is potentially costly, so callers should only use `captures_iter` /// over `find_iter` when they actually need access to the capture group /// matches. /// /// # Time complexity /// /// Note that since `captures_iter` runs potentially many searches on the /// haystack and since each search has worst case `O(m * n)` time /// complexity, the overall worst case time complexity for iteration is /// `O(m * n^2)`. /// /// # Example /// /// We can use this to find all movie titles and their release years in /// some haystack, where the movie is formatted like "'Title' (xxxx)": /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"'([^']+)'\s+\(([0-9]{4})\)").unwrap(); /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; /// let mut movies = vec![]; /// for (_, [title, year]) in re.captures_iter(hay).map(|c| c.extract()) { /// movies.push((title, year.parse::<i64>()?)); /// } /// assert_eq!(movies, vec![ /// ("Citizen Kane", 1941), /// ("The Wizard of Oz", 1939), /// ("M", 1931), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Or with named groups: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>[0-9]{4})\)").unwrap(); /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; /// let mut it = re.captures_iter(hay); /// /// let caps = it.next().unwrap(); /// assert_eq!(&caps["title"], "Citizen Kane"); /// assert_eq!(&caps["year"], "1941"); /// /// let caps = it.next().unwrap(); /// assert_eq!(&caps["title"], "The Wizard of Oz"); /// assert_eq!(&caps["year"], "1939"); /// /// let caps = it.next().unwrap(); /// assert_eq!(&caps["title"], "M"); /// assert_eq!(&caps["year"], "1931"); /// ``` #[inline] pub fn captures_iter<'r, 'h>( &'r self, haystack: &'h str, ) -> CaptureMatches<'r, 'h> { CaptureMatches { haystack, re: self, it: self .pikevm .captures_iter(self.pool.get(), haystack.as_bytes()), } } /// Returns an iterator of substrings of the haystack given, delimited by a /// match of the regex. Namely, each element of the iterator corresponds to /// a part of the haystack that *isn't* matched by the regular expression. /// /// # Time complexity /// /// Since iterators over all matches requires running potentially many /// searches on the haystack, and since each search has worst case /// `O(m * n)` time complexity, the overall worst case time complexity for /// this routine is `O(m * n^2)`. /// /// # Example /// /// To split a string delimited by arbitrary amounts of spaces or tabs: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"[ \t]+").unwrap(); /// let hay = "a b \t c\td e"; /// let fields: Vec<&str> = re.split(hay).collect(); /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]); /// ``` /// /// # Example: more cases /// /// Basic usage: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r" ").unwrap(); /// let hay = "Mary had a little lamb"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]); /// /// let re = Regex::new(r"X").unwrap(); /// let hay = ""; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec![""]); /// /// let re = Regex::new(r"X").unwrap(); /// let hay = "lionXXtigerXleopard"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]); /// /// let re = Regex::new(r"::").unwrap(); /// let hay = "lion::tiger::leopard"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["lion", "tiger", "leopard"]); /// ``` /// /// If a haystack contains multiple contiguous matches, you will end up /// with empty spans yielded by the iterator: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"X").unwrap(); /// let hay = "XXXXaXXbXc"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); /// /// let re = Regex::new(r"/").unwrap(); /// let hay = "(///)"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["(", "", "", ")"]); /// ``` /// /// Separators at the start or end of a haystack are neighbored by empty /// substring. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"0").unwrap(); /// let hay = "010"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["", "1", ""]); /// ``` /// /// When the empty string is used as a regex, it splits at every valid /// UTF-8 boundary by default (which includes the beginning and end of the /// haystack): /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"").unwrap(); /// let hay = "rust"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]); /// /// // Splitting by an empty string is UTF-8 aware by default! /// let re = Regex::new(r"").unwrap(); /// let hay = "☃"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["", "☃", ""]); /// ``` /// /// Contiguous separators (commonly shows up with whitespace), can lead to /// possibly surprising behavior. For example, this code is correct: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r" ").unwrap(); /// let hay = " a b c"; /// let got: Vec<&str> = re.split(hay).collect(); /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); /// ``` /// /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want /// to match contiguous space characters: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r" +").unwrap(); /// let hay = " a b c"; /// let got: Vec<&str> = re.split(hay).collect(); /// // N.B. This does still include a leading empty span because ' +' /// // matches at the beginning of the haystack. /// assert_eq!(got, vec!["", "a", "b", "c"]); /// ``` #[inline] pub fn split<'r, 'h>(&'r self, haystack: &'h str) -> Split<'r, 'h> { Split { haystack, finder: self.find_iter(haystack), last: 0 } } /// Returns an iterator of at most `limit` substrings of the haystack /// given, delimited by a match of the regex. (A `limit` of `0` will return /// no substrings.) Namely, each element of the iterator corresponds to a /// part of the haystack that *isn't* matched by the regular expression. /// The remainder of the haystack that is not split will be the last /// element in the iterator. /// /// # Time complexity /// /// Since iterators over all matches requires running potentially many /// searches on the haystack, and since each search has worst case /// `O(m * n)` time complexity, the overall worst case time complexity for /// this routine is `O(m * n^2)`. /// /// Although note that the worst case time here has an upper bound given /// by the `limit` parameter. /// /// # Example /// /// Get the first two words in some haystack: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\W+").unwrap(); /// let hay = "Hey! How are you?"; /// let fields: Vec<&str> = re.splitn(hay, 3).collect(); /// assert_eq!(fields, vec!["Hey", "How", "are you?"]); /// ``` /// /// # Examples: more cases /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r" ").unwrap(); /// let hay = "Mary had a little lamb"; /// let got: Vec<&str> = re.splitn(hay, 3).collect(); /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]); /// /// let re = Regex::new(r"X").unwrap(); /// let hay = ""; /// let got: Vec<&str> = re.splitn(hay, 3).collect(); /// assert_eq!(got, vec![""]); /// /// let re = Regex::new(r"X").unwrap(); /// let hay = "lionXXtigerXleopard"; /// let got: Vec<&str> = re.splitn(hay, 3).collect(); /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]); /// /// let re = Regex::new(r"::").unwrap(); /// let hay = "lion::tiger::leopard"; /// let got: Vec<&str> = re.splitn(hay, 2).collect(); /// assert_eq!(got, vec!["lion", "tiger::leopard"]); /// /// let re = Regex::new(r"X").unwrap(); /// let hay = "abcXdef"; /// let got: Vec<&str> = re.splitn(hay, 1).collect(); /// assert_eq!(got, vec!["abcXdef"]); /// /// let re = Regex::new(r"X").unwrap(); /// let hay = "abcdef"; /// let got: Vec<&str> = re.splitn(hay, 2).collect(); /// assert_eq!(got, vec!["abcdef"]); /// /// let re = Regex::new(r"X").unwrap(); /// let hay = "abcXdef"; /// let got: Vec<&str> = re.splitn(hay, 0).collect(); /// assert!(got.is_empty()); /// ``` #[inline] pub fn splitn<'r, 'h>( &'r self, haystack: &'h str, limit: usize, ) -> SplitN<'r, 'h> { SplitN { splits: self.split(haystack), limit } } /// Replaces the leftmost-first match in the given haystack with the /// replacement provided. The replacement can be a regular string (where /// `$N` and `$name` are expanded to match capture groups) or a function /// that takes a [`Captures`] and returns the replaced string. /// /// If no match is found, then the haystack is returned unchanged. In that /// case, this implementation will likely return a `Cow::Borrowed` value /// such that no allocation is performed. /// /// # Replacement string syntax /// /// All instances of `$ref` in the replacement string are replaced with /// the substring corresponding to the capture group identified by `ref`. /// /// `ref` may be an integer corresponding to the index of the capture group /// (counted by order of opening parenthesis where `0` is the entire match) /// or it can be a name (consisting of letters, digits or underscores) /// corresponding to a named capture group. /// /// If `ref` isn't a valid capture group (whether the name doesn't exist or /// isn't a valid index), then it is replaced with the empty string. /// /// The longest possible name is used. For example, `$1a` looks up the /// capture group named `1a` and not the capture group at index `1`. To /// exert more precise control over the name, use braces, e.g., `${1}a`. /// /// To write a literal `$` use `$$`. /// /// # Example /// /// Note that this function is polymorphic with respect to the replacement. /// In typical usage, this can just be a normal string: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"[^01]+").unwrap(); /// assert_eq!(re.replace("1078910", ""), "1010"); /// ``` /// /// But anything satisfying the [`Replacer`] trait will work. For example, /// a closure of type `|&Captures| -> String` provides direct access to the /// captures corresponding to a match. This allows one to access capturing /// group matches easily: /// /// ``` /// use regex_lite::{Captures, Regex}; /// /// let re = Regex::new(r"([^,\s]+),\s+(\S+)").unwrap(); /// let result = re.replace("<NAME>", |caps: &Captures| { /// format!("{} {}", &caps[2], &caps[1]) /// }); /// assert_eq!(result, "<NAME>"); /// ``` /// /// But this is a bit cumbersome to use all the time. Instead, a simple /// syntax is supported (as described above) that expands `$name` into the /// corresponding capture group. Here's the last example, but using this /// expansion technique with named capture groups: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); /// let result = re.replace("<NAME>", "$first $last"); /// assert_eq!(result, "<NAME>"); /// ``` /// /// Note that using `$2` instead of `$first` or `$1` instead of `$last` /// would produce the same result. To write a literal `$` use `$$`. /// /// Sometimes the replacement string requires use of curly braces to /// delineate a capture group replacement when it is adjacent to some other /// literal text. For example, if we wanted to join two words together with /// an underscore: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?<first>\w+)\s+(?<second>\w+)").unwrap(); /// let result = re.replace("deep fried", "${first}_$second"); /// assert_eq!(result, "deep_fried"); /// ``` /// /// Without the curly braces, the capture group name `first_` would be /// used, and since it doesn't exist, it would be replaced with the empty /// string. /// /// Finally, sometimes you just want to replace a literal string with no /// regard for capturing group expansion. This can be done by wrapping a /// string with [`NoExpand`]: /// /// ``` /// use regex_lite::{NoExpand, Regex}; /// /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); /// let result = re.replace("<NAME>", NoExpand("$2 $last")); /// assert_eq!(result, "$2 $last"); /// ``` /// /// Using `NoExpand` may also be faster, since the replacement string won't /// need to be parsed for the `$` syntax. #[inline] pub fn replace<'h, R: Replacer>( &self, haystack: &'h str, rep: R, ) -> Cow<'h, str> { self.replacen(haystack, 1, rep) } /// Replaces all non-overlapping matches in the haystack with the /// replacement provided. This is the same as calling `replacen` with /// `limit` set to `0`. /// /// The documentation for [`Regex::replace`] goes into more detail about /// what kinds of replacement strings are supported. /// /// # Time complexity /// /// Since iterators over all matches requires running potentially many /// searches on the haystack, and since each search has worst case /// `O(m * n)` time complexity, the overall worst case time complexity for /// this routine is `O(m * n^2)`. /// /// # Fallibility /// /// If you need to write a replacement routine where any individual /// replacement might "fail," doing so with this API isn't really feasible /// because there's no way to stop the search process if a replacement /// fails. Instead, if you need this functionality, you should consider /// implementing your own replacement routine: /// /// ``` /// use regex_lite::{Captures, Regex}; /// /// fn replace_all<E>( /// re: &Regex, /// haystack: &str, /// replacement: impl Fn(&Captures) -> Result<String, E>, /// ) -> Result<String, E> { /// let mut new = String::with_capacity(haystack.len()); /// let mut last_match = 0; /// for caps in re.captures_iter(haystack) { /// let m = caps.get(0).unwrap(); /// new.push_str(&haystack[last_match..m.start()]); /// new.push_str(&replacement(&caps)?); /// last_match = m.end(); /// } /// new.push_str(&haystack[last_match..]); /// Ok(new) /// } /// /// // Let's replace each word with the number of bytes in that word. /// // But if we see a word that is "too long," we'll give up. /// let re = Regex::new(r"\w+").unwrap(); /// let replacement = |caps: &Captures| -> Result<String, &'static str> { /// if caps[0].len() >= 5 { /// return Err("word too long"); /// } /// Ok(caps[0].len().to_string()) /// }; /// assert_eq!( /// Ok("2 3 3 3?".to_string()), /// replace_all(&re, "hi how are you?", &replacement), /// ); /// assert!(replace_all(&re, "hi there", &replacement).is_err()); /// ``` /// /// # Example /// /// This example shows how to flip the order of whitespace delimited /// fields, and normalizes the whitespace that delimits the fields: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?m)^(\S+)\s+(\S+)$").unwrap(); /// let hay = " /// Greetings 1973 /// Wild\t1973 /// BornToRun\t\t\t\t1975 /// Darkness 1978 /// TheRiver 1980 /// "; /// let new = re.replace_all(hay, "$2 $1"); /// assert_eq!(new, " /// 1973 Greetings /// 1973 Wild /// 1975 BornToRun /// 1978 Darkness /// 1980 TheRiver /// "); /// ``` #[inline] pub fn replace_all<'h, R: Replacer>( &self, haystack: &'h str, rep: R, ) -> Cow<'h, str> { self.replacen(haystack, 0, rep) } /// Replaces at most `limit` non-overlapping matches in the haystack with /// the replacement provided. If `limit` is `0`, then all non-overlapping /// matches are replaced. That is, `Regex::replace_all(hay, rep)` is /// equivalent to `Regex::replacen(hay, 0, rep)`. /// /// The documentation for [`Regex::replace`] goes into more detail about /// what kinds of replacement strings are supported. /// /// # Time complexity /// /// Since iterators over all matches requires running potentially many /// searches on the haystack, and since each search has worst case /// `O(m * n)` time complexity, the overall worst case time complexity for /// this routine is `O(m * n^2)`. /// /// Although note that the worst case time here has an upper bound given /// by the `limit` parameter. /// /// # Fallibility /// /// See the corresponding section in the docs for [`Regex::replace_all`] /// for tips on how to deal with a replacement routine that can fail. /// /// # Example /// /// This example shows how to flip the order of whitespace delimited /// fields, and normalizes the whitespace that delimits the fields. But we /// only do it for the first two matches. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?m)^(\S+)\s+(\S+)$").unwrap(); /// let hay = " /// Greetings 1973 /// Wild\t1973 /// BornToRun\t\t\t\t1975 /// Darkness 1978 /// TheRiver 1980 /// "; /// let new = re.replacen(hay, 2, "$2 $1"); /// assert_eq!(new, " /// 1973 Greetings /// 1973 Wild /// BornToRun\t\t\t\t1975 /// Darkness 1978 /// TheRiver 1980 /// "); /// ``` #[inline] pub fn replacen<'h, R: Replacer>( &self, haystack: &'h str, limit: usize, mut rep: R, ) -> Cow<'h, str> { // If we know that the replacement doesn't have any capture expansions, // then we can use the fast path. The fast path can make a tremendous // difference: // // 1) We use `find_iter` instead of `captures_iter`. Not asking for // captures generally makes the regex engines faster. // 2) We don't need to look up all of the capture groups and do // replacements inside the replacement string. We just push it // at each match and be done with it. if let Some(rep) = rep.no_expansion() { let mut it = self.find_iter(haystack).enumerate().peekable(); if it.peek().is_none() { return Cow::Borrowed(haystack); } let mut new = String::with_capacity(haystack.len()); let mut last_match = 0; for (i, m) in it { new.push_str(&haystack[last_match..m.start()]); new.push_str(&rep); last_match = m.end(); if limit > 0 && i >= limit - 1 { break; } } new.push_str(&haystack[last_match..]); return Cow::Owned(new); } // The slower path, which we use if the replacement needs access to // capture groups. let mut it = self.captures_iter(haystack).enumerate().peekable(); if it.peek().is_none() { return Cow::Borrowed(haystack); } let mut new = String::with_capacity(haystack.len()); let mut last_match = 0; for (i, cap) in it { // unwrap on 0 is OK because captures only reports matches let m = cap.get(0).unwrap(); new.push_str(&haystack[last_match..m.start()]); rep.replace_append(&cap, &mut new); last_match = m.end(); if limit > 0 && i >= limit - 1 { break; } } new.push_str(&haystack[last_match..]); Cow::Owned(new) } } /// A group of advanced or "lower level" search methods. Some methods permit /// starting the search at a position greater than `0` in the haystack. Other /// methods permit reusing allocations, for example, when extracting the /// matches for capture groups. impl Regex { /// Returns the end byte offset of the first match in the haystack given. /// /// This method may have the same performance characteristics as /// `is_match`. Behaviorlly, it doesn't just report whether it match /// occurs, but also the end offset for a match. In particular, the offset /// returned *may be shorter* than the proper end of the leftmost-first /// match that you would find via [`Regex::find`]. /// /// Note that it is not guaranteed that this routine finds the shortest or /// "earliest" possible match. Instead, the main idea of this API is that /// it returns the offset at the point at which the internal regex engine /// has determined that a match has occurred. This may vary depending on /// which internal regex engine is used, and thus, the offset itself may /// change based on internal heuristics. /// /// # Example /// /// Typically, `a+` would match the entire first sequence of `a` in some /// haystack, but `shortest_match` *may* give up as soon as it sees the /// first `a`. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"a+").unwrap(); /// let offset = re.shortest_match("aaaaa").unwrap(); /// assert_eq!(offset, 1); /// ``` #[inline] pub fn shortest_match(&self, haystack: &str) -> Option<usize> { self.shortest_match_at(haystack, 0) } /// Returns the same as [`Regex::shortest_match`], but starts the search at /// the given offset. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only match /// when `start == 0`. /// /// If a match is found, the offset returned is relative to the beginning /// of the haystack, not the beginning of the search. /// /// # Panics /// /// This panics when `start >= haystack.len() + 1`. /// /// # Example /// /// This example shows the significance of `start` by demonstrating how it /// can be used to permit look-around assertions in a regex to take the /// surrounding context into account. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\bchew\b").unwrap(); /// let hay = "eschew"; /// // We get a match here, but it's probably not intended. /// assert_eq!(re.shortest_match(&hay[2..]), Some(4)); /// // No match because the assertions take the context into account. /// assert_eq!(re.shortest_match_at(hay, 2), None); /// ``` #[inline] pub fn shortest_match_at( &self, haystack: &str, start: usize, ) -> Option<usize> { let mut cache = self.pool.get(); let mut slots = [None, None]; let matched = self.pikevm.search( &mut cache, haystack.as_bytes(), start, haystack.len(), true, &mut slots, ); if !matched { return None; } Some(slots[1].unwrap().get()) } /// Returns the same as [`Regex::is_match`], but starts the search at the /// given offset. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. /// /// # Panics /// /// This panics when `start >= haystack.len() + 1`. /// /// # Example /// /// This example shows the significance of `start` by demonstrating how it /// can be used to permit look-around assertions in a regex to take the /// surrounding context into account. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\bchew\b").unwrap(); /// let hay = "eschew"; /// // We get a match here, but it's probably not intended. /// assert!(re.is_match(&hay[2..])); /// // No match because the assertions take the context into account. /// assert!(!re.is_match_at(hay, 2)); /// ``` #[inline] pub fn is_match_at(&self, haystack: &str, start: usize) -> bool { let mut cache = self.pool.get(); self.pikevm.search( &mut cache, haystack.as_bytes(), start, haystack.len(), true, &mut [], ) } /// Returns the same as [`Regex::find`], but starts the search at the given /// offset. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. /// /// # Panics /// /// This panics when `start >= haystack.len() + 1`. /// /// # Example /// /// This example shows the significance of `start` by demonstrating how it /// can be used to permit look-around assertions in a regex to take the /// surrounding context into account. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\bchew\b").unwrap(); /// let hay = "eschew"; /// // We get a match here, but it's probably not intended. /// assert_eq!(re.find(&hay[2..]).map(|m| m.range()), Some(0..4)); /// // No match because the assertions take the context into account. /// assert_eq!(re.find_at(hay, 2), None); /// ``` #[inline] pub fn find_at<'h>( &self, haystack: &'h str, start: usize, ) -> Option<Match<'h>> { let mut cache = self.pool.get(); let mut slots = [None, None]; let matched = self.pikevm.search( &mut cache, haystack.as_bytes(), start, haystack.len(), false, &mut slots, ); if !matched { return None; } let (start, end) = (slots[0].unwrap().get(), slots[1].unwrap().get()); Some(Match::new(haystack, start, end)) } /// Returns the same as [`Regex::captures`], but starts the search at the /// given offset. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. /// /// # Panics /// /// This panics when `start >= haystack.len() + 1`. /// /// # Example /// /// This example shows the significance of `start` by demonstrating how it /// can be used to permit look-around assertions in a regex to take the /// surrounding context into account. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\bchew\b").unwrap(); /// let hay = "eschew"; /// // We get a match here, but it's probably not intended. /// assert_eq!(&re.captures(&hay[2..]).unwrap()[0], "chew"); /// // No match because the assertions take the context into account. /// assert!(re.captures_at(hay, 2).is_none()); /// ``` #[inline] pub fn captures_at<'h>( &self, haystack: &'h str, start: usize, ) -> Option<Captures<'h>> { let mut caps = Captures { haystack, slots: self.capture_locations(), pikevm: Arc::clone(&self.pikevm), }; let mut cache = self.pool.get(); let matched = self.pikevm.search( &mut cache, haystack.as_bytes(), start, haystack.len(), false, &mut caps.slots.0, ); if !matched { return None; } Some(caps) } /// This is like [`Regex::captures`], but writes the byte offsets of each /// capture group match into the locations given. /// /// A [`CaptureLocations`] stores the same byte offsets as a [`Captures`], /// but does *not* store a reference to the haystack. This makes its API /// a bit lower level and less convenience. But in exchange, callers /// may allocate their own `CaptureLocations` and reuse it for multiple /// searches. This may be helpful if allocating a `Captures` shows up in a /// profile as too costly. /// /// To create a `CaptureLocations` value, use the /// [`Regex::capture_locations`] method. /// /// This also returns the overall match if one was found. When a match is /// found, its offsets are also always stored in `locs` at index `0`. /// /// # Panics /// /// This routine may panic if the given `CaptureLocations` was not created /// by this regex. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"^([a-z]+)=(\S*)$").unwrap(); /// let mut locs = re.capture_locations(); /// assert!(re.captures_read(&mut locs, "id=foo123").is_some()); /// assert_eq!(Some((0, 9)), locs.get(0)); /// assert_eq!(Some((0, 2)), locs.get(1)); /// assert_eq!(Some((3, 9)), locs.get(2)); /// ``` #[inline] pub fn captures_read<'h>( &self, locs: &mut CaptureLocations, haystack: &'h str, ) -> Option<Match<'h>> { self.captures_read_at(locs, haystack, 0) } /// Returns the same as [`Regex::captures_read`], but starts the search at /// the given offset. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. /// /// # Panics /// /// This panics when `start >= haystack.len() + 1`. /// /// This routine may also panic if the given `CaptureLocations` was not /// created by this regex. /// /// # Example /// /// This example shows the significance of `start` by demonstrating how it /// can be used to permit look-around assertions in a regex to take the /// surrounding context into account. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\bchew\b").unwrap(); /// let hay = "eschew"; /// let mut locs = re.capture_locations(); /// // We get a match here, but it's probably not intended. /// assert!(re.captures_read(&mut locs, &hay[2..]).is_some()); /// // No match because the assertions take the context into account. /// assert!(re.captures_read_at(&mut locs, hay, 2).is_none()); /// ``` #[inline] pub fn captures_read_at<'h>( &self, locs: &mut CaptureLocations, haystack: &'h str, start: usize, ) -> Option<Match<'h>> { let mut cache = self.pool.get(); let matched = self.pikevm.search( &mut cache, haystack.as_bytes(), start, haystack.len(), false, &mut locs.0, ); if !matched { return None; } let (start, end) = locs.get(0).unwrap(); Some(Match::new(haystack, start, end)) } } /// Auxiliary methods. impl Regex { /// Returns the original string of this regex. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"foo\w+bar").unwrap(); /// assert_eq!(re.as_str(), r"foo\w+bar"); /// ``` #[inline] pub fn as_str(&self) -> &str { &self.pikevm.nfa().pattern() } /// Returns an iterator over the capture names in this regex. /// /// The iterator returned yields elements of type `Option<&str>`. That is, /// the iterator yields values for all capture groups, even ones that are /// unnamed. The order of the groups corresponds to the order of the group's /// corresponding opening parenthesis. /// /// The first element of the iterator always yields the group corresponding /// to the overall match, and this group is always unnamed. Therefore, the /// iterator always yields at least one group. /// /// # Example /// /// This shows basic usage with a mix of named and unnamed capture groups: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); /// let mut names = re.capture_names(); /// assert_eq!(names.next(), Some(None)); /// assert_eq!(names.next(), Some(Some("a"))); /// assert_eq!(names.next(), Some(Some("b"))); /// assert_eq!(names.next(), Some(None)); /// // the '(?:.)' group is non-capturing and so doesn't appear here! /// assert_eq!(names.next(), Some(Some("c"))); /// assert_eq!(names.next(), None); /// ``` /// /// The iterator always yields at least one element, even for regexes with /// no capture groups and even for regexes that can never match: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"").unwrap(); /// let mut names = re.capture_names(); /// assert_eq!(names.next(), Some(None)); /// assert_eq!(names.next(), None); /// /// let re = Regex::new(r"[^\s\S]").unwrap(); /// let mut names = re.capture_names(); /// assert_eq!(names.next(), Some(None)); /// assert_eq!(names.next(), None); /// ``` #[inline] pub fn capture_names(&self) -> CaptureNames<'_> { CaptureNames(self.pikevm.nfa().capture_names()) } /// Returns the number of captures groups in this regex. /// /// This includes all named and unnamed groups, including the implicit /// unnamed group that is always present and corresponds to the entire /// match. /// /// Since the implicit unnamed group is always included in this length, the /// length returned is guaranteed to be greater than zero. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"foo").unwrap(); /// assert_eq!(1, re.captures_len()); /// /// let re = Regex::new(r"(foo)").unwrap(); /// assert_eq!(2, re.captures_len()); /// /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); /// assert_eq!(5, re.captures_len()); /// /// let re = Regex::new(r"[^\s\S]").unwrap(); /// assert_eq!(1, re.captures_len()); /// ``` #[inline] pub fn captures_len(&self) -> usize { self.pikevm.nfa().group_len() } /// Returns the total number of capturing groups that appear in every /// possible match. /// /// If the number of capture groups can vary depending on the match, then /// this returns `None`. That is, a value is only returned when the number /// of matching groups is invariant or "static." /// /// Note that like [`Regex::captures_len`], this **does** include the /// implicit capturing group corresponding to the entire match. Therefore, /// when a non-None value is returned, it is guaranteed to be at least `1`. /// Stated differently, a return value of `Some(0)` is impossible. /// /// # Example /// /// This shows a few cases where a static number of capture groups is /// available and a few cases where it is not. /// /// ``` /// use regex_lite::Regex; /// /// let len = |pattern| { /// Regex::new(pattern).map(|re| re.static_captures_len()) /// }; /// /// assert_eq!(Some(1), len("a")?); /// assert_eq!(Some(2), len("(a)")?); /// assert_eq!(Some(2), len("(a)|(b)")?); /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); /// assert_eq!(None, len("(a)|b")?); /// assert_eq!(None, len("a|(b)")?); /// assert_eq!(None, len("(b)*")?); /// assert_eq!(Some(2), len("(b)+")?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn static_captures_len(&self) -> Option<usize> { self.pikevm .nfa() .static_explicit_captures_len() .map(|len| len.saturating_add(1)) } /// Returns a fresh allocated set of capture locations that can /// be reused in multiple calls to [`Regex::captures_read`] or /// [`Regex::captures_read_at`]. /// /// The returned locations can be used for any subsequent search for this /// particular regex. There is no guarantee that it is correct to use for /// other regexes, even if they have the same number of capture groups. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(.)(.)(\w+)").unwrap(); /// let mut locs = re.capture_locations(); /// assert!(re.captures_read(&mut locs, "Padron").is_some()); /// assert_eq!(locs.get(0), Some((0, 6))); /// assert_eq!(locs.get(1), Some((0, 1))); /// assert_eq!(locs.get(2), Some((1, 2))); /// assert_eq!(locs.get(3), Some((2, 6))); /// ``` #[inline] pub fn capture_locations(&self) -> CaptureLocations { // OK because NFA construction would have failed if this overflowed. let len = self.pikevm.nfa().group_len().checked_mul(2).unwrap(); CaptureLocations(vec![None; len]) } } /// Represents a single match of a regex in a haystack. /// /// A `Match` contains both the start and end byte offsets of the match and the /// actual substring corresponding to the range of those byte offsets. It is /// guaranteed that `start <= end`. When `start == end`, the match is empty. /// /// Since this `Match` can only be produced by the top-level `Regex` APIs /// that only support searching UTF-8 encoded strings, the byte offsets for a /// `Match` are guaranteed to fall on valid UTF-8 codepoint boundaries. That /// is, slicing a `&str` with [`Match::range`] is guaranteed to never panic. /// /// Values with this type are created by [`Regex::find`] or /// [`Regex::find_iter`]. Other APIs can create `Match` values too. For /// example, [`Captures::get`]. /// /// The lifetime parameter `'h` refers to the lifetime of the matched of the /// haystack that this match was produced from. /// /// # Numbering /// /// The byte offsets in a `Match` form a half-open interval. That is, the /// start of the range is inclusive and the end of the range is exclusive. /// For example, given a haystack `abcFOOxyz` and a match of `FOO`, its byte /// offset range starts at `3` and ends at `6`. `3` corresponds to `F` and /// `6` corresponds to `x`, which is one past the end of the match. This /// corresponds to the same kind of slicing that Rust uses. /// /// For more on why this was chosen over other schemes (aside from being /// consistent with how Rust the language works), see [this discussion] and /// [Dijkstra's note on a related topic][note]. /// /// [this discussion]: https://github.com/rust-lang/regex/discussions/866 /// [note]: https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html /// /// # Example /// /// This example shows the value of each of the methods on `Match` for a /// particular search. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"\d+").unwrap(); /// let hay = "numbers: 1234"; /// let m = re.find(hay).unwrap(); /// assert_eq!(9, m.start()); /// assert_eq!(13, m.end()); /// assert!(!m.is_empty()); /// assert_eq!(4, m.len()); /// assert_eq!(9..13, m.range()); /// assert_eq!("1234", m.as_str()); /// ``` #[derive(Copy, Clone, Eq, PartialEq)] pub struct Match<'h> { haystack: &'h str, start: usize, end: usize, } impl<'h> Match<'h> { /// Creates a new match from the given haystack and byte offsets. #[inline] fn new(haystack: &'h str, start: usize, end: usize) -> Match<'h> { Match { haystack, start, end } } /// Returns the byte offset of the start of the match in the haystack. The /// start of the match corresponds to the position where the match begins /// and includes the first byte in the match. /// /// It is guaranteed that `Match::start() <= Match::end()`. /// /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That /// is, it will never be an offset that appears between the UTF-8 code /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is /// always safe to slice the corresponding haystack using this offset. #[inline] pub fn start(&self) -> usize { self.start } /// Returns the byte offset of the end of the match in the haystack. The /// end of the match corresponds to the byte immediately following the last /// byte in the match. This means that `&slice[start..end]` works as one /// would expect. /// /// It is guaranteed that `Match::start() <= Match::end()`. /// /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That /// is, it will never be an offset that appears between the UTF-8 code /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is /// always safe to slice the corresponding haystack using this offset. #[inline] pub fn end(&self) -> usize { self.end } /// Returns true if and only if this match has a length of zero. /// /// Note that an empty match can only occur when the regex itself can /// match the empty string. Here are some examples of regexes that can /// all match the empty string: `^`, `^$`, `\b`, `a?`, `a*`, `a{0}`, /// `(foo|\d+|quux)?`. #[inline] pub fn is_empty(&self) -> bool { self.start == self.end } /// Returns the length, in bytes, of this match. #[inline] pub fn len(&self) -> usize { self.end - self.start } /// Returns the range over the starting and ending byte offsets of the /// match in the haystack. /// /// It is always correct to slice the original haystack searched with this /// range. That is, because the offsets are guaranteed to fall on valid /// UTF-8 boundaries, the range returned is always valid. #[inline] pub fn range(&self) -> core::ops::Range<usize> { self.start..self.end } /// Returns the substring of the haystack that matched. #[inline] pub fn as_str(&self) -> &'h str { &self.haystack[self.range()] } } impl<'h> core::fmt::Debug for Match<'h> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("Match") .field("start", &self.start) .field("end", &self.end) .field("string", &self.as_str()) .finish() } } impl<'h> From<Match<'h>> for &'h str { fn from(m: Match<'h>) -> &'h str { m.as_str() } } impl<'h> From<Match<'h>> for core::ops::Range<usize> { fn from(m: Match<'h>) -> core::ops::Range<usize> { m.range() } } /// Represents the capture groups for a single match. /// /// Capture groups refer to parts of a regex enclosed in parentheses. They can /// be optionally named. The purpose of capture groups is to be able to /// reference different parts of a match based on the original pattern. For /// example, say you want to match the individual letters in a 5-letter word: /// /// ```text /// (?<first>\w)(\w)(?:\w)\w(?<last>\w) /// ``` /// /// This regex has 4 capture groups: /// /// * The group at index `0` corresponds to the overall match. It is always /// present in every match and never has a name. /// * The group at index `1` with name `first` corresponding to the first /// letter. /// * The group at index `2` with no name corresponding to the second letter. /// * The group at index `3` with name `last` corresponding to the fifth and /// last letter. /// /// Notice that `(?:\w)` was not listed above as a capture group despite it /// being enclosed in parentheses. That's because `(?:pattern)` is a special /// syntax that permits grouping but *without* capturing. The reason for not /// treating it as a capture is that tracking and reporting capture groups /// requires additional state that may lead to slower searches. So using as few /// capture groups as possible can help performance. (Although the difference /// in performance of a couple of capture groups is likely immaterial.) /// /// Values with this type are created by [`Regex::captures`] or /// [`Regex::captures_iter`]. /// /// `'h` is the lifetime of the haystack that these captures were matched from. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?<first>\w)(\w)(?:\w)\w(?<last>\w)").unwrap(); /// let caps = re.captures("toady").unwrap(); /// assert_eq!("toady", &caps[0]); /// assert_eq!("t", &caps["first"]); /// assert_eq!("o", &caps[2]); /// assert_eq!("y", &caps["last"]); /// ``` pub struct Captures<'h> { haystack: &'h str, slots: CaptureLocations, // It's a little weird to put the PikeVM in our Captures, but it's the // simplest thing to do and is cheap. The PikeVM gives us access to the // NFA and the NFA gives us access to the capture name<->index mapping. pikevm: Arc<PikeVM>, } impl<'h> Captures<'h> { /// Returns the `Match` associated with the capture group at index `i`. If /// `i` does not correspond to a capture group, or if the capture group did /// not participate in the match, then `None` is returned. /// /// When `i == 0`, this is guaranteed to return a non-`None` value. /// /// # Examples /// /// Get the substring that matched with a default of an empty string if the /// group didn't participate in the match: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap(); /// let caps = re.captures("abc123").unwrap(); /// /// let substr1 = caps.get(1).map_or("", |m| m.as_str()); /// let substr2 = caps.get(2).map_or("", |m| m.as_str()); /// assert_eq!(substr1, "123"); /// assert_eq!(substr2, ""); /// ``` #[inline] pub fn get(&self, i: usize) -> Option<Match<'h>> { self.slots.get(i).map(|(s, e)| Match::new(self.haystack, s, e)) } /// Returns the `Match` associated with the capture group named `name`. If /// `name` isn't a valid capture group or it refers to a group that didn't /// match, then `None` is returned. /// /// Note that unlike `caps["name"]`, this returns a `Match` whose lifetime /// matches the lifetime of the haystack in this `Captures` value. /// Conversely, the substring returned by `caps["name"]` has a lifetime /// of the `Captures` value, which is likely shorter than the lifetime of /// the haystack. In some cases, it may be necessary to use this method to /// access the matching substring instead of the `caps["name"]` notation. /// /// # Examples /// /// Get the substring that matched with a default of an empty string if the /// group didn't participate in the match: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new( /// r"[a-z]+(?:(?<numbers>[0-9]+)|(?<letters>[A-Z]+))", /// ).unwrap(); /// let caps = re.captures("abc123").unwrap(); /// /// let numbers = caps.name("numbers").map_or("", |m| m.as_str()); /// let letters = caps.name("letters").map_or("", |m| m.as_str()); /// assert_eq!(numbers, "123"); /// assert_eq!(letters, ""); /// ``` #[inline] pub fn name(&self, name: &str) -> Option<Match<'h>> { let i = self.pikevm.nfa().to_index(name)?; self.get(i) } /// This is a convenience routine for extracting the substrings /// corresponding to matching capture groups. /// /// This returns a tuple where the first element corresponds to the full /// substring of the haystack that matched the regex. The second element is /// an array of substrings, with each corresponding to the to the substring /// that matched for a particular capture group. /// /// # Panics /// /// This panics if the number of possible matching groups in this /// `Captures` value is not fixed to `N` in all circumstances. /// More precisely, this routine only works when `N` is equivalent to /// [`Regex::static_captures_len`]. /// /// Stated more plainly, if the number of matching capture groups in a /// regex can vary from match to match, then this function always panics. /// /// For example, `(a)(b)|(c)` could produce two matching capture groups /// or one matching capture group for any given match. Therefore, one /// cannot use `extract` with such a pattern. /// /// But a pattern like `(a)(b)|(c)(d)` can be used with `extract` because /// the number of capture groups in every match is always equivalent, /// even if the capture _indices_ in each match are not. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); /// let hay = "On 2010-03-14, I became a Tenneessee lamb."; /// let Some((full, [year, month, day])) = /// re.captures(hay).map(|caps| caps.extract()) else { return }; /// assert_eq!("2010-03-14", full); /// assert_eq!("2010", year); /// assert_eq!("03", month); /// assert_eq!("14", day); /// ``` /// /// # Example: iteration /// /// This example shows how to use this method when iterating over all /// `Captures` matches in a haystack. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); /// let hay = "1973-01-05, 1975-08-25 and 1980-10-18"; /// /// let mut dates: Vec<(&str, &str, &str)> = vec![]; /// for (_, [y, m, d]) in re.captures_iter(hay).map(|c| c.extract()) { /// dates.push((y, m, d)); /// } /// assert_eq!(dates, vec![ /// ("1973", "01", "05"), /// ("1975", "08", "25"), /// ("1980", "10", "18"), /// ]); /// ``` /// /// # Example: parsing different formats /// /// This API is particularly useful when you need to extract a particular /// value that might occur in a different format. Consider, for example, /// an identifier that might be in double quotes or single quotes: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r#"id:(?:"([^"]+)"|'([^']+)')"#).unwrap(); /// let hay = r#"The first is id:"foo" and the second is id:'bar'."#; /// let mut ids = vec![]; /// for (_, [id]) in re.captures_iter(hay).map(|c| c.extract()) { /// ids.push(id); /// } /// assert_eq!(ids, vec!["foo", "bar"]); /// ``` pub fn extract<const N: usize>(&self) -> (&'h str, [&'h str; N]) { let len = self .pikevm .nfa() .static_explicit_captures_len() .expect("number of capture groups can vary in a match"); assert_eq!(N, len, "asked for {} groups, but must ask for {}", N, len); let mut matched = self.iter().flatten(); let whole_match = matched.next().expect("a match").as_str(); let group_matches = [0; N].map(|_| { matched.next().expect("too few matching groups").as_str() }); (whole_match, group_matches) } /// Expands all instances of `$ref` in `replacement` to the corresponding /// capture group, and writes them to the `dst` buffer given. A `ref` can /// be a capture group index or a name. If `ref` doesn't refer to a capture /// group that participated in the match, then it is replaced with the /// empty string. /// /// # Format /// /// The format of the replacement string supports two different kinds of /// capture references: unbraced and braced. /// /// For the unbraced format, the format supported is `$ref` where `name` /// can be any character in the class `[0-9A-Za-z_]`. `ref` is always /// the longest possible parse. So for example, `$1a` corresponds to the /// capture group named `1a` and not the capture group at index `1`. If /// `ref` matches `^[0-9]+$`, then it is treated as a capture group index /// itself and not a name. /// /// For the braced format, the format supported is `${ref}` where `ref` can /// be any sequence of bytes except for `}`. If no closing brace occurs, /// then it is not considered a capture reference. As with the unbraced /// format, if `ref` matches `^[0-9]+$`, then it is treated as a capture /// group index and not a name. /// /// The braced format is useful for exerting precise control over the name /// of the capture reference. For example, `${1}a` corresponds to the /// capture group reference `1` followed by the letter `a`, where as `$1a` /// (as mentioned above) corresponds to the capture group reference `1a`. /// The braced format is also useful for expressing capture group names /// that use characters not supported by the unbraced format. For example, /// `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`. /// /// If a capture group reference is found and it does not refer to a valid /// capture group, then it will be replaced with the empty string. /// /// To write a literal `$`, use `$$`. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new( /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", /// ).unwrap(); /// let hay = "On 14-03-2010, I became a Tenneessee lamb."; /// let caps = re.captures(hay).unwrap(); /// /// let mut dst = String::new(); /// caps.expand("year=$year, month=$month, day=$day", &mut dst); /// assert_eq!(dst, "year=2010, month=03, day=14"); /// ``` #[inline] pub fn expand(&self, replacement: &str, dst: &mut String) { interpolate::string( replacement, |index, dst| { let m = match self.get(index) { None => return, Some(m) => m, }; dst.push_str(&self.haystack[m.range()]); }, |name| self.pikevm.nfa().to_index(name), dst, ); } /// Returns an iterator over all capture groups. This includes both /// matching and non-matching groups. /// /// The iterator always yields at least one matching group: the first group /// (at index `0`) with no name. Subsequent groups are returned in the order /// of their opening parenthesis in the regex. /// /// The elements yielded have type `Option<Match<'h>>`, where a non-`None` /// value is present if the capture group matches. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); /// let caps = re.captures("AZ").unwrap(); /// /// let mut it = caps.iter(); /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("AZ")); /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("A")); /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), None); /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("Z")); /// assert_eq!(it.next(), None); /// ``` #[inline] pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 'h> { SubCaptureMatches { caps: self, it: self.pikevm.nfa().capture_names().enumerate(), } } /// Returns the total number of capture groups. This includes both /// matching and non-matching groups. /// /// The length returned is always equivalent to the number of elements /// yielded by [`Captures::iter`]. Consequently, the length is always /// greater than zero since every `Captures` value always includes the /// match for the entire regex. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); /// let caps = re.captures("AZ").unwrap(); /// assert_eq!(caps.len(), 4); /// ``` #[inline] pub fn len(&self) -> usize { self.pikevm.nfa().group_len() } } impl<'h> core::fmt::Debug for Captures<'h> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { /// A little helper type to provide a nice map-like debug /// representation for our capturing group spans. /// /// regex-automata has something similar, but it includes the pattern /// ID in its debug output, which is confusing. It also doesn't include /// that strings that match because a regex-automata `Captures` doesn't /// borrow the haystack. struct CapturesDebugMap<'a> { caps: &'a Captures<'a>, } impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let mut map = f.debug_map(); let names = self.caps.pikevm.nfa().capture_names(); for (group_index, maybe_name) in names.enumerate() { let key = Key(group_index, maybe_name); match self.caps.get(group_index) { None => map.entry(&key, &None::<()>), Some(mat) => map.entry(&key, &Value(mat)), }; } map.finish() } } struct Key<'a>(usize, Option<&'a str>); impl<'a> core::fmt::Debug for Key<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{}", self.0)?; if let Some(name) = self.1 { write!(f, "/{:?}", name)?; } Ok(()) } } struct Value<'a>(Match<'a>); impl<'a> core::fmt::Debug for Value<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!( f, "{}..{}/{:?}", self.0.start(), self.0.end(), self.0.as_str() ) } } f.debug_tuple("Captures") .field(&CapturesDebugMap { caps: self }) .finish() } } /// Get a matching capture group's haystack substring by index. /// /// The haystack substring returned can't outlive the `Captures` object if this /// method is used, because of how `Index` is defined (normally `a[i]` is part /// of `a` and can't outlive it). To work around this limitation, do that, use /// [`Captures::get`] instead. /// /// `'h` is the lifetime of the matched haystack, but the lifetime of the /// `&str` returned by this implementation is the lifetime of the `Captures` /// value itself. /// /// # Panics /// /// If there is no matching group at the given index. impl<'h> core::ops::Index<usize> for Captures<'h> { type Output = str; // The lifetime is written out to make it clear that the &str returned // does NOT have a lifetime equivalent to 'h. fn index(&self, i: usize) -> &str { self.get(i) .map(|m| m.as_str()) .unwrap_or_else(|| panic!("no group at index '{}'", i)) } } /// Get a matching capture group's haystack substring by name. /// /// The haystack substring returned can't outlive the `Captures` object if this /// method is used, because of how `Index` is defined (normally `a[i]` is part /// of `a` and can't outlive it). To work around this limitation, do that, use /// [`Captures::get`] instead. /// /// `'h` is the lifetime of the matched haystack, but the lifetime of the /// `&str` returned by this implementation is the lifetime of the `Captures` /// value itself. /// /// `'n` is the lifetime of the group name used to index the `Captures` value. /// /// # Panics /// /// If there is no matching group at the given name. impl<'h, 'n> core::ops::Index<&'n str> for Captures<'h> { type Output = str; fn index<'a>(&'a self, name: &'n str) -> &'a str { self.name(name) .map(|m| m.as_str()) .unwrap_or_else(|| panic!("no group named '{}'", name)) } } /// A low level representation of the byte offsets of each capture group. /// /// You can think of this as a lower level [`Captures`], where this type does /// not support named capturing groups directly and it does not borrow the /// haystack that these offsets were matched on. /// /// Primarily, this type is useful when using the lower level `Regex` APIs such /// as [`Regex::captures_read`], which permits amortizing the allocation in /// which capture match offsets are stored. /// /// In order to build a value of this type, you'll need to call the /// [`Regex::capture_locations`] method. The value returned can then be reused /// in subsequent searches for that regex. Using it for other regexes may /// result in a panic or otherwise incorrect results. /// /// # Example /// /// This example shows how to create and use `CaptureLocations` in a search. /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); /// let mut locs = re.capture_locations(); /// let m = re.captures_read(&mut locs, "<NAME>").unwrap(); /// assert_eq!(0..17, m.range()); /// assert_eq!(Some((0, 17)), locs.get(0)); /// assert_eq!(Some((0, 5)), locs.get(1)); /// assert_eq!(Some((6, 17)), locs.get(2)); /// /// // Asking for an invalid capture group always returns None. /// assert_eq!(None, locs.get(3)); /// assert_eq!(None, locs.get(34973498648)); /// assert_eq!(None, locs.get(9944060567225171988)); /// ``` #[derive(Clone, Debug)] pub struct CaptureLocations(Vec<Option<NonMaxUsize>>); impl CaptureLocations { /// Returns the start and end byte offsets of the capture group at index /// `i`. This returns `None` if `i` is not a valid capture group or if the /// capture group did not match. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); /// let mut locs = re.capture_locations(); /// re.captures_read(&mut locs, "<NAME>").unwrap(); /// assert_eq!(Some((0, 17)), locs.get(0)); /// assert_eq!(Some((0, 5)), locs.get(1)); /// assert_eq!(Some((6, 17)), locs.get(2)); /// ``` #[inline] pub fn get(&self, i: usize) -> Option<(usize, usize)> { let slot = i.checked_mul(2)?; let start = self.0.get(slot).copied()??.get(); let slot = slot.checked_add(1)?; let end = self.0.get(slot).copied()??.get(); Some((start, end)) } /// Returns the total number of capture groups (even if they didn't match). /// That is, the length returned is unaffected by the result of a search. /// /// This is always at least `1` since every regex has at least `1` /// capturing group that corresponds to the entire match. /// /// # Example /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); /// let mut locs = re.capture_locations(); /// assert_eq!(3, locs.len()); /// re.captures_read(&mut locs, "<NAME>").unwrap(); /// assert_eq!(3, locs.len()); /// ``` /// /// Notice that the length is always at least `1`, regardless of the regex: /// /// ``` /// use regex_lite::Regex; /// /// let re = Regex::new(r"").unwrap(); /// let locs = re.capture_locations(); /// assert_eq!(1, locs.len()); /// /// // [a&&b] is a regex that never matches anything. /// let re = Regex::new(r"[^\s\S]").unwrap(); /// let locs = re.capture_locations(); /// assert_eq!(1, locs.len()); /// ``` #[inline] pub fn len(&self) -> usize { // We always have twice as many slots as groups. self.0.len().checked_shr(1).unwrap() } } /// An iterator over all non-overlapping matches in a haystack. /// /// This iterator yields [`Match`] values. The iterator stops when no more /// matches can be found. /// /// `'r` is the lifetime of the compiled regular expression and `'h` is the /// lifetime of the haystack. /// /// This iterator is created by [`Regex::find_iter`]. /// /// # Time complexity /// /// Note that since an iterator runs potentially many searches on the haystack /// and since each search has worst case `O(m * n)` time complexity, the /// overall worst case time complexity for iteration is `O(m * n^2)`. #[derive(Debug)] pub struct Matches<'r, 'h> { haystack: &'h str, it: pikevm::FindMatches<'r, 'h>, } impl<'r, 'h> Iterator for Matches<'r, 'h> { type Item = Match<'h>; #[inline] fn next(&mut self) -> Option<Match<'h>> { self.it.next().map(|(s, e)| Match::new(self.haystack, s, e)) } #[inline] fn count(self) -> usize { self.it.count() } } impl<'r, 'h> core::iter::FusedIterator for Matches<'r, 'h> {} /// An iterator over all non-overlapping capture matches in a haystack. /// /// This iterator yields [`Captures`] values. The iterator stops when no more /// matches can be found. /// /// `'r` is the lifetime of the compiled regular expression and `'h` is the /// lifetime of the matched string. /// /// This iterator is created by [`Regex::captures_iter`]. /// /// # Time complexity /// /// Note that since an iterator runs potentially many searches on the haystack /// and since each search has worst case `O(m * n)` time complexity, the /// overall worst case time complexity for iteration is `O(m * n^2)`. #[derive(Debug)] pub struct CaptureMatches<'r, 'h> { haystack: &'h str, re: &'r Regex, it: pikevm::CapturesMatches<'r, 'h>, } impl<'r, 'h> Iterator for CaptureMatches<'r, 'h> { type Item = Captures<'h>; #[inline] fn next(&mut self) -> Option<Captures<'h>> { self.it.next().map(|slots| Captures { haystack: self.haystack, slots: CaptureLocations(slots), pikevm: Arc::clone(&self.re.pikevm), }) } #[inline] fn count(self) -> usize { self.it.count() } } impl<'r, 'h> core::iter::FusedIterator for CaptureMatches<'r, 'h> {} /// An iterator over all substrings delimited by a regex match. /// /// `'r` is the lifetime of the compiled regular expression and `'h` is the /// lifetime of the byte string being split. /// /// This iterator is created by [`Regex::split`]. /// /// # Time complexity /// /// Note that since an iterator runs potentially many searches on the haystack /// and since each search has worst case `O(m * n)` time complexity, the /// overall worst case time complexity for iteration is `O(m * n^2)`. #[derive(Debug)] pub struct Split<'r, 'h> { haystack: &'h str, finder: Matches<'r, 'h>, last: usize, } impl<'r, 'h> Iterator for Split<'r, 'h> { type Item = &'h str; #[inline] fn next(&mut self) -> Option<&'h str> { match self.finder.next() { None => { let len = self.haystack.len(); if self.last > len { None } else { let range = self.last..len; self.last = len + 1; // Next call will return None Some(&self.haystack[range]) } } Some(m) => { let range = self.last..m.start(); self.last = m.end(); Some(&self.haystack[range]) } } } } impl<'r, 't> core::iter::FusedIterator for Split<'r, 't> {} /// An iterator over at most `N` substrings delimited by a regex match. /// /// The last substring yielded by this iterator will be whatever remains after /// `N-1` splits. /// /// `'r` is the lifetime of the compiled regular expression and `'h` is the /// lifetime of the byte string being split. /// /// This iterator is created by [`Regex::splitn`]. /// /// # Time complexity /// /// Note that since an iterator runs potentially many searches on the haystack /// and since each search has worst case `O(m * n)` time complexity, the /// overall worst case time complexity for iteration is `O(m * n^2)`. /// /// Although note that the worst case time here has an upper bound given /// by the `limit` parameter to [`Regex::splitn`]. #[derive(Debug)] pub struct SplitN<'r, 'h> { splits: Split<'r, 'h>, limit: usize, } impl<'r, 'h> Iterator for SplitN<'r, 'h> { type Item = &'h str; #[inline] fn next(&mut self) -> Option<&'h str> { if self.limit == 0 { return None; } self.limit -= 1; if self.limit > 0 { return self.splits.next(); } let len = self.splits.haystack.len(); if self.splits.last > len { // We've already returned all substrings. None } else { // self.n == 0, so future calls will return None immediately Some(&self.splits.haystack[self.splits.last..len]) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.splits.size_hint() } } impl<'r, 't> core::iter::FusedIterator for SplitN<'r, 't> {} /// An iterator over the names of all capture groups in a regex. /// /// This iterator yields values of type `Option<&str>` in order of the opening /// capture group parenthesis in the regex pattern. `None` is yielded for /// groups with no name. The first element always corresponds to the implicit /// and unnamed group for the overall match. /// /// `'r` is the lifetime of the compiled regular expression. /// /// This iterator is created by [`Regex::capture_names`]. #[derive(Clone, Debug)] pub struct CaptureNames<'r>(nfa::CaptureNames<'r>); impl<'r> Iterator for CaptureNames<'r> { type Item = Option<&'r str>; #[inline] fn next(&mut self) -> Option<Option<&'r str>> { self.0.next() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.0.size_hint() } #[inline] fn count(self) -> usize { self.0.count() } } impl<'r> ExactSizeIterator for CaptureNames<'r> {} impl<'r> core::iter::FusedIterator for CaptureNames<'r> {} /// An iterator over all group matches in a [`Captures`] value. /// /// This iterator yields values of type `Option<Match<'h>>`, where `'h` is the /// lifetime of the haystack that the matches are for. The order of elements /// yielded corresponds to the order of the opening parenthesis for the group /// in the regex pattern. `None` is yielded for groups that did not participate /// in the match. /// /// The first element always corresponds to the implicit group for the overall /// match. Since this iterator is created by a [`Captures`] value, and a /// `Captures` value is only created when a match occurs, it follows that the /// first element yielded by this iterator is guaranteed to be non-`None`. /// /// The lifetime `'c` corresponds to the lifetime of the `Captures` value that /// created this iterator, and the lifetime `'h` corresponds to the originally /// matched haystack. #[derive(Clone, Debug)] pub struct SubCaptureMatches<'c, 'h> { caps: &'c Captures<'h>, it: core::iter::Enumerate<nfa::CaptureNames<'c>>, } impl<'c, 'h> Iterator for SubCaptureMatches<'c, 'h> { type Item = Option<Match<'h>>; #[inline] fn next(&mut self) -> Option<Option<Match<'h>>> { let (group_index, _) = self.it.next()?; Some(self.caps.get(group_index)) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.it.size_hint() } #[inline] fn count(self) -> usize { self.it.count() } } impl<'c, 'h> ExactSizeIterator for SubCaptureMatches<'c, 'h> {} impl<'c, 'h> core::iter::FusedIterator for SubCaptureMatches<'c, 'h> {} /// A trait for types that can be used to replace matches in a haystack. /// /// In general, users of this crate shouldn't need to implement this trait, /// since implementations are already provided for `&str` along with other /// variants of string types, as well as `FnMut(&Captures) -> String` (or any /// `FnMut(&Captures) -> T` where `T: AsRef<str>`). Those cover most use cases, /// but callers can implement this trait directly if necessary. /// /// # Example /// /// This example shows a basic implementation of the `Replacer` trait. This /// can be done much more simply using the replacement string interpolation /// support (e.g., `$first $last`), but this approach avoids needing to parse /// the replacement string at all. /// /// ``` /// use regex_lite::{Captures, Regex, Replacer}; /// /// struct NameSwapper; /// /// impl Replacer for NameSwapper { /// fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { /// dst.push_str(&caps["first"]); /// dst.push_str(" "); /// dst.push_str(&caps["last"]); /// } /// } /// /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); /// let result = re.replace("<NAME>", NameSwapper); /// assert_eq!(result, "<NAME>"); /// ``` pub trait Replacer { /// Appends possibly empty data to `dst` to replace the current match. /// /// The current match is represented by `caps`, which is guaranteed to /// have a match at capture group `0`. /// /// For example, a no-op replacement would be `dst.push_str(&caps[0])`. fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String); /// Return a fixed unchanging replacement string. /// /// When doing replacements, if access to [`Captures`] is not needed (e.g., /// the replacement string does not need `$` expansion), then it can be /// beneficial to avoid finding sub-captures. /// /// In general, this is called once for every call to a replacement routine /// such as [`Regex::replace_all`]. fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, str>> { None } /// Returns a type that implements `Replacer`, but that borrows and wraps /// this `Replacer`. /// /// This is useful when you want to take a generic `Replacer` (which might /// not be cloneable) and use it without consuming it, so it can be used /// more than once. /// /// # Example /// /// ``` /// use regex_lite::{Regex, Replacer}; /// /// fn replace_all_twice<R: Replacer>( /// re: Regex, /// src: &str, /// mut rep: R, /// ) -> String { /// let dst = re.replace_all(src, rep.by_ref()); /// let dst = re.replace_all(&dst, rep.by_ref()); /// dst.into_owned() /// } /// ``` fn by_ref<'r>(&'r mut self) -> ReplacerRef<'r, Self> { ReplacerRef(self) } } impl<'a> Replacer for &'a str { fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { caps.expand(*self, dst); } fn no_expansion(&mut self) -> Option<Cow<'_, str>> { no_expansion(self) } } impl<'a> Replacer for &'a String { fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { self.as_str().replace_append(caps, dst) } fn no_expansion(&mut self) -> Option<Cow<'_, str>> { no_expansion(self) } } impl Replacer for String { fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { self.as_str().replace_append(caps, dst) } fn no_expansion(&mut self) -> Option<Cow<'_, str>> { no_expansion(self) } } impl<'a> Replacer for Cow<'a, str> { fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { self.as_ref().replace_append(caps, dst) } fn no_expansion(&mut self) -> Option<Cow<'_, str>> { no_expansion(self) } } impl<'a> Replacer for &'a Cow<'a, str> { fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { self.as_ref().replace_append(caps, dst) } fn no_expansion(&mut self) -> Option<Cow<'_, str>> { no_expansion(self) } } impl<F, T> Replacer for F where F: FnMut(&Captures<'_>) -> T, T: AsRef<str>, { fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { dst.push_str((*self)(caps).as_ref()); } } /// A by-reference adaptor for a [`Replacer`]. /// /// This permits reusing the same `Replacer` value in multiple calls to a /// replacement routine like [`Regex::replace_all`]. /// /// This type is created by [`Replacer::by_ref`]. #[derive(Debug)] pub struct ReplacerRef<'a, R: ?Sized>(&'a mut R); impl<'a, R: Replacer + ?Sized + 'a> Replacer for ReplacerRef<'a, R> { fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { self.0.replace_append(caps, dst) } fn no_expansion(&mut self) -> Option<Cow<'_, str>> { self.0.no_expansion() } } /// A helper type for forcing literal string replacement. /// /// It can be used with routines like [`Regex::replace`] and /// [`Regex::replace_all`] to do a literal string replacement without expanding /// `$name` to their corresponding capture groups. This can be both convenient /// (to avoid escaping `$`, for example) and faster (since capture groups /// don't need to be found). /// /// `'s` is the lifetime of the literal string to use. /// /// # Example /// /// ``` /// use regex_lite::{NoExpand, Regex}; /// /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); /// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last")); /// assert_eq!(result, "$2 $last"); /// ``` #[derive(Clone, Debug)] pub struct NoExpand<'t>(pub &'t str); impl<'t> Replacer for NoExpand<'t> { fn replace_append(&mut self, _: &Captures<'_>, dst: &mut String) { dst.push_str(self.0); } fn no_expansion(&mut self) -> Option<Cow<'_, str>> { Some(Cow::Borrowed(self.0)) } } /// Quickly checks the given replacement string for whether interpolation /// should be done on it. It returns `None` if a `$` was found anywhere in the /// given string, which suggests interpolation needs to be done. But if there's /// no `$` anywhere, then interpolation definitely does not need to be done. In /// that case, the given string is returned as a borrowed `Cow`. /// /// This is meant to be used to implement the `Replacer::no_expandsion` method /// in its various trait impls. fn no_expansion<T: AsRef<str>>(t: &T) -> Option<Cow<'_, str>> { let s = t.as_ref(); match s.find('$') { Some(_) => None, None => Some(Cow::Borrowed(s)), } } /// A configurable builder for a [`Regex`]. /// /// This builder can be used to programmatically set flags such as `i` (case /// insensitive) and `x` (for verbose mode). This builder can also be used to /// configure things like a size limit on the compiled regular expression. #[derive(Debug)] pub struct RegexBuilder { pattern: String, hir_config: hir::Config, nfa_config: nfa::Config, } impl RegexBuilder { /// Create a new builder with a default configuration for the given /// pattern. /// /// If the pattern is invalid or exceeds the configured size limits, then /// an error will be returned when [`RegexBuilder::build`] is called. pub fn new(pattern: &str) -> RegexBuilder { RegexBuilder { pattern: pattern.to_string(), hir_config: hir::Config::default(), nfa_config: nfa::Config::default(), } } /// Compiles the pattern given to `RegexBuilder::new` with the /// configuration set on this builder. /// /// If the pattern isn't a valid regex or if a configured size limit was /// exceeded, then an error is returned. pub fn build(&self) -> Result<Regex, Error> { let hir = Hir::parse(self.hir_config, &self.pattern)?; let nfa = NFA::new(self.nfa_config, self.pattern.clone(), &hir)?; let pikevm = Arc::new(PikeVM::new(nfa)); let pool = { let pikevm = Arc::clone(&pikevm); let create = Box::new(move || Cache::new(&pikevm)); CachePool::new(create) }; Ok(Regex { pikevm, pool }) } /// This configures whether to enable ASCII case insensitive matching for /// the entire pattern. /// /// This setting can also be configured using the inline flag `i` /// in the pattern. For example, `(?i:foo)` matches `foo` case /// insensitively while `(?-i:foo)` matches `foo` case sensitively. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// let re = RegexBuilder::new(r"foo(?-i:bar)quux") /// .case_insensitive(true) /// .build() /// .unwrap(); /// assert!(re.is_match("FoObarQuUx")); /// // Even though case insensitive matching is enabled in the builder, /// // it can be locally disabled within the pattern. In this case, /// // `bar` is matched case sensitively. /// assert!(!re.is_match("fooBARquux")); /// ``` pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder { self.hir_config.flags.case_insensitive = yes; self } /// This configures multi-line mode for the entire pattern. /// /// Enabling multi-line mode changes the behavior of the `^` and `$` anchor /// assertions. Instead of only matching at the beginning and end of a /// haystack, respectively, multi-line mode causes them to match at the /// beginning and end of a line *in addition* to the beginning and end of /// a haystack. More precisely, `^` will match at the position immediately /// following a `\n` and `$` will match at the position immediately /// preceding a `\n`. /// /// The behavior of this option is impacted by the [`RegexBuilder::crlf`] /// setting. Namely, CRLF mode changes the line terminator to be either /// `\r` or `\n`, but never at the position between a `\r` and `\`n. /// /// This setting can also be configured using the inline flag `m` in the /// pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .build() /// .unwrap(); /// assert_eq!(Some(1..4), re.find("\nfoo\n").map(|m| m.range())); /// ``` pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { self.hir_config.flags.multi_line = yes; self } /// This configures dot-matches-new-line mode for the entire pattern. /// /// Perhaps surprisingly, the default behavior for `.` is not to match /// any character, but rather, to match any character except for the line /// terminator (which is `\n` by default). When this mode is enabled, the /// behavior changes such that `.` truly matches any character. /// /// This setting can also be configured using the inline flag `s` in the /// pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// let re = RegexBuilder::new(r"foo.bar") /// .dot_matches_new_line(true) /// .build() /// .unwrap(); /// let hay = "foo\nbar"; /// assert_eq!(Some("foo\nbar"), re.find(hay).map(|m| m.as_str())); /// ``` pub fn dot_matches_new_line(&mut self, yes: bool) -> &mut RegexBuilder { self.hir_config.flags.dot_matches_new_line = yes; self } /// This configures CRLF mode for the entire pattern. /// /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for /// short) and `\n` ("line feed" or LF for short) are treated as line /// terminators. This results in the following: /// /// * Unless dot-matches-new-line mode is enabled, `.` will now match any /// character except for `\n` and `\r`. /// * When multi-line mode is enabled, `^` will match immediately /// following a `\n` or a `\r`. Similarly, `$` will match immediately /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match between /// `\r` and `\n`. /// /// This setting can also be configured using the inline flag `R` in /// the pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// let re = RegexBuilder::new(r"^foo$") /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = "\r\nfoo\r\n"; /// // If CRLF mode weren't enabled here, then '$' wouldn't match /// // immediately after 'foo', and thus no match would be found. /// assert_eq!(Some("foo"), re.find(hay).map(|m| m.as_str())); /// ``` /// /// This example demonstrates that `^` will never match at a position /// between `\r` and `\n`. (`$` will similarly not match between a `\r` /// and a `\n`.) /// /// ``` /// use regex_lite::RegexBuilder; /// /// let re = RegexBuilder::new(r"^") /// .multi_line(true) /// .crlf(true) /// .build() /// .unwrap(); /// let hay = "\r\n\r\n"; /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect(); /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]); /// ``` pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder { self.hir_config.flags.crlf = yes; self } /// This configures swap-greed mode for the entire pattern. /// /// When swap-greed mode is enabled, patterns like `a+` will become /// non-greedy and patterns like `a+?` will become greedy. In other words, /// the meanings of `a+` and `a+?` are switched. /// /// This setting can also be configured using the inline flag `U` in the /// pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// let re = RegexBuilder::new(r"a+") /// .swap_greed(true) /// .build() /// .unwrap(); /// assert_eq!(Some("a"), re.find("aaa").map(|m| m.as_str())); /// ``` pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { self.hir_config.flags.swap_greed = yes; self } /// This configures verbose mode for the entire pattern. /// /// When enabled, whitespace will treated as insignifcant in the pattern /// and `#` can be used to start a comment until the next new line. /// /// Normally, in most places in a pattern, whitespace is treated literally. /// For example ` +` will match one or more ASCII whitespace characters. /// /// When verbose mode is enabled, `\#` can be used to match a literal `#` /// and `\ ` can be used to match a literal ASCII whitespace character. /// /// Verbose mode is useful for permitting regexes to be formatted and /// broken up more nicely. This may make them more easily readable. /// /// This setting can also be configured using the inline flag `x` in the /// pattern. /// /// The default for this is `false`. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// let pat = r" /// \b /// (?<first>[A-Z]\w*) # always start with uppercase letter /// \s+ # whitespace should separate names /// (?: # middle name can be an initial! /// (?:(?<initial>[A-Z])\.|(?<middle>[A-Z]\w*)) /// \s+ /// )? /// (?<last>[A-Z]\w*) /// \b /// "; /// let re = RegexBuilder::new(pat) /// .ignore_whitespace(true) /// .build() /// .unwrap(); /// /// let caps = re.captures("<NAME>").unwrap(); /// assert_eq!("Harry", &caps["first"]); /// assert_eq!("Potter", &caps["last"]); /// /// let caps = re.captures("<NAME>").unwrap(); /// assert_eq!("Harry", &caps["first"]); /// // Since a middle name/initial isn't required for an overall match, /// // we can't assume that 'initial' or 'middle' will be populated! /// assert_eq!(Some("J"), caps.name("initial").map(|m| m.as_str())); /// assert_eq!(None, caps.name("middle").map(|m| m.as_str())); /// assert_eq!("Potter", &caps["last"]); /// /// let caps = re.captures("<NAME>").unwrap(); /// assert_eq!("Harry", &caps["first"]); /// // Since a middle name/initial isn't required for an overall match, /// // we can't assume that 'initial' or 'middle' will be populated! /// assert_eq!(None, caps.name("initial").map(|m| m.as_str())); /// assert_eq!(Some("James"), caps.name("middle").map(|m| m.as_str())); /// assert_eq!("Potter", &caps["last"]); /// ``` pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder { self.hir_config.flags.ignore_whitespace = yes; self } /// Sets the approximate size limit, in bytes, of the compiled regex. /// /// This roughly corresponds to the number of heap memory, in bytes, /// occupied by a single regex. If the regex would otherwise approximately /// exceed this limit, then compiling that regex will fail. /// /// The main utility of a method like this is to avoid compiling regexes /// that use an unexpected amount of resources, such as time and memory. /// Even if the memory usage of a large regex is acceptable, its search /// time may not be. Namely, worst case time complexity for search is `O(m /// * n)`, where `m ~ len(pattern)` and `n ~ len(haystack)`. That is, /// search time depends, in part, on the size of the compiled regex. This /// means that putting a limit on the size of the regex limits how much a /// regex can impact search time. /// /// The default for this is some reasonable number that permits most /// patterns to compile successfully. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// assert!(RegexBuilder::new(r"\w").size_limit(100).build().is_err()); /// ``` pub fn size_limit(&mut self, limit: usize) -> &mut RegexBuilder { self.nfa_config.size_limit = Some(limit); self } /// Set the nesting limit for this parser. /// /// The nesting limit controls how deep the abstract syntax tree is allowed /// to be. If the AST exceeds the given limit (e.g., with too many nested /// groups), then an error is returned by the parser. /// /// The purpose of this limit is to act as a heuristic to prevent stack /// overflow for consumers that do structural induction on an AST using /// explicit recursion. While this crate never does this (instead using /// constant stack space and moving the call stack to the heap), other /// crates may. /// /// This limit is not checked until the entire AST is parsed. Therefore, if /// callers want to put a limit on the amount of heap space used, then they /// should impose a limit on the length, in bytes, of the concrete pattern /// string. In particular, this is viable since this parser implementation /// will limit itself to heap space proportional to the length of the /// pattern string. See also the [untrusted inputs](crate#untrusted-input) /// section in the top-level crate documentation for more information about /// this. /// /// Note that a nest limit of `0` will return a nest limit error for most /// patterns but not all. For example, a nest limit of `0` permits `a` but /// not `ab`, since `ab` requires an explicit concatenation, which results /// in a nest depth of `1`. In general, a nest limit is not something that /// manifests in an obvious way in the concrete syntax, therefore, it /// should not be used in a granular way. /// /// # Example /// /// ``` /// use regex_lite::RegexBuilder; /// /// assert!(RegexBuilder::new(r"").nest_limit(0).build().is_ok()); /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok()); /// assert!(RegexBuilder::new(r"(a)").nest_limit(0).build().is_err()); /// ``` pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { self.hir_config.nest_limit = limit; self } } <file_sep>/regex-automata/src/dfa/determinize.rs use alloc::{collections::BTreeMap, vec::Vec}; use crate::{ dfa::{ dense::{self, BuildError}, DEAD, }, nfa::thompson, util::{ self, alphabet::{self, ByteSet}, determinize::{State, StateBuilderEmpty, StateBuilderNFA}, primitives::{PatternID, StateID}, search::{Anchored, MatchKind}, sparse_set::SparseSets, start::Start, }, }; /// A builder for configuring and running a DFA determinizer. #[derive(Clone, Debug)] pub(crate) struct Config { match_kind: MatchKind, quit: ByteSet, dfa_size_limit: Option<usize>, determinize_size_limit: Option<usize>, } impl Config { /// Create a new default config for a determinizer. The determinizer may be /// configured before calling `run`. pub fn new() -> Config { Config { match_kind: MatchKind::LeftmostFirst, quit: ByteSet::empty(), dfa_size_limit: None, determinize_size_limit: None, } } /// Run determinization on the given NFA and write the resulting DFA into /// the one given. The DFA given should be initialized but otherwise empty. /// "Initialized" means that it is setup to handle the NFA's byte classes, /// number of patterns and whether to build start states for each pattern. pub fn run( &self, nfa: &thompson::NFA, dfa: &mut dense::OwnedDFA, ) -> Result<(), BuildError> { let dead = State::dead(); let quit = State::dead(); let mut cache = StateMap::default(); // We only insert the dead state here since its representation is // identical to the quit state. And we never want anything pointing // to the quit state other than specific transitions derived from the // determinizer's configured "quit" bytes. // // We do put the quit state into 'builder_states' below. This ensures // that a proper DFA state ID is allocated for it, and that no other // DFA state uses the "location after the DEAD state." That is, it // is assumed that the quit state is always the state immediately // following the DEAD state. cache.insert(dead.clone(), DEAD); let runner = Runner { config: self.clone(), nfa, dfa, builder_states: alloc::vec![dead, quit], cache, memory_usage_state: 0, sparses: SparseSets::new(nfa.states().len()), stack: alloc::vec![], scratch_state_builder: StateBuilderEmpty::new(), }; runner.run() } /// The match semantics to use for determinization. /// /// MatchKind::All corresponds to the standard textbook construction. /// All possible match states are represented in the DFA. /// MatchKind::LeftmostFirst permits greediness and otherwise tries to /// simulate the match semantics of backtracking regex engines. Namely, /// only a subset of match states are built, and dead states are used to /// stop searches with an unanchored prefix. /// /// The default is MatchKind::LeftmostFirst. pub fn match_kind(&mut self, kind: MatchKind) -> &mut Config { self.match_kind = kind; self } /// The set of bytes to use that will cause the DFA to enter a quit state, /// stop searching and return an error. By default, this is empty. pub fn quit(&mut self, set: ByteSet) -> &mut Config { self.quit = set; self } /// The limit, in bytes of the heap, that the DFA is permitted to use. This /// does not include the auxiliary heap storage used by determinization. pub fn dfa_size_limit(&mut self, bytes: Option<usize>) -> &mut Config { self.dfa_size_limit = bytes; self } /// The limit, in bytes of the heap, that determinization itself is allowed /// to use. This does not include the size of the DFA being built. pub fn determinize_size_limit( &mut self, bytes: Option<usize>, ) -> &mut Config { self.determinize_size_limit = bytes; self } } /// The actual implementation of determinization that converts an NFA to a DFA /// through powerset construction. /// /// This determinizer roughly follows the typical powerset construction, where /// each DFA state is comprised of one or more NFA states. In the worst case, /// there is one DFA state for every possible combination of NFA states. In /// practice, this only happens in certain conditions, typically when there are /// bounded repetitions. /// /// The main differences between this implementation and typical deteminization /// are that this implementation delays matches by one state and hackily makes /// look-around work. Comments below attempt to explain this. /// /// The lifetime variable `'a` refers to the lifetime of the NFA or DFA, /// whichever is shorter. #[derive(Debug)] struct Runner<'a> { /// The configuration used to initialize determinization. config: Config, /// The NFA we're converting into a DFA. nfa: &'a thompson::NFA, /// The DFA we're building. dfa: &'a mut dense::OwnedDFA, /// Each DFA state being built is defined as an *ordered* set of NFA /// states, along with some meta facts about the ordered set of NFA states. /// /// This is never empty. The first state is always a dummy state such that /// a state id == 0 corresponds to a dead state. The second state is always /// the quit state. /// /// Why do we have states in both a `Vec` and in a cache map below? /// Well, they serve two different roles based on access patterns. /// `builder_states` is the canonical home of each state, and provides /// constant random access by a DFA state's ID. The cache map below, on /// the other hand, provides a quick way of searching for identical DFA /// states by using the DFA state as a key in the map. Of course, we use /// reference counting to avoid actually duplicating the state's data /// itself. (Although this has never been benchmarked.) Note that the cache /// map does not give us full minimization; it just lets us avoid some very /// obvious redundant states. /// /// Note that the index into this Vec isn't quite the DFA's state ID. /// Rather, it's just an index. To get the state ID, you have to multiply /// it by the DFA's stride. That's done by self.dfa.from_index. And the /// inverse is self.dfa.to_index. /// /// Moreover, DFA states don't usually retain the IDs assigned to them /// by their position in this Vec. After determinization completes, /// states are shuffled around to support other optimizations. See the /// sibling 'special' module for more details on that. (The reason for /// mentioning this is that if you print out the DFA for debugging during /// determinization, and then print out the final DFA after it is fully /// built, then the state IDs likely won't match up.) builder_states: Vec<State>, /// A cache of DFA states that already exist and can be easily looked up /// via ordered sets of NFA states. /// /// See `builder_states` docs for why we store states in two different /// ways. cache: StateMap, /// The memory usage, in bytes, used by builder_states and cache. We track /// this as new states are added since states use a variable amount of /// heap. Tracking this as we add states makes it possible to compute the /// total amount of memory used by the determinizer in constant time. memory_usage_state: usize, /// A pair of sparse sets for tracking ordered sets of NFA state IDs. /// These are reused throughout determinization. A bounded sparse set /// gives us constant time insertion, membership testing and clearing. sparses: SparseSets, /// Scratch space for a stack of NFA states to visit, for depth first /// visiting without recursion. stack: Vec<StateID>, /// Scratch space for storing an ordered sequence of NFA states, for /// amortizing allocation. This is principally useful for when we avoid /// adding a new DFA state since it already exists. In order to detect this /// case though, we still need an ordered set of NFA state IDs. So we use /// this space to stage that ordered set before we know whether we need to /// create a new DFA state or not. scratch_state_builder: StateBuilderEmpty, } /// A map from states to state identifiers. When using std, we use a standard /// hashmap, since it's a bit faster for this use case. (Other maps, like /// one's based on FNV, have not yet been benchmarked.) /// /// The main purpose of this map is to reuse states where possible. This won't /// fully minimize the DFA, but it works well in a lot of cases. #[cfg(feature = "std")] type StateMap = std::collections::HashMap<State, StateID>; #[cfg(not(feature = "std"))] type StateMap = BTreeMap<State, StateID>; impl<'a> Runner<'a> { /// Build the DFA. If there was a problem constructing the DFA (e.g., if /// the chosen state identifier representation is too small), then an error /// is returned. fn run(mut self) -> Result<(), BuildError> { if self.nfa.look_set_any().contains_word_unicode() && !self.config.quit.contains_range(0x80, 0xFF) { return Err(BuildError::unsupported_dfa_word_boundary_unicode()); } // A sequence of "representative" bytes drawn from each equivalence // class. These representative bytes are fed to the NFA to compute // state transitions. This allows us to avoid re-computing state // transitions for bytes that are guaranteed to produce identical // results. Since computing the representatives needs to do a little // work, we do it once here because we'll be iterating over them a lot. let representatives: Vec<alphabet::Unit> = self.dfa.byte_classes().representatives(..).collect(); // The set of all DFA state IDs that still need to have their // transitions set. We start by seeding this with all starting states. let mut uncompiled = alloc::vec![]; self.add_all_starts(&mut uncompiled)?; while let Some(dfa_id) = uncompiled.pop() { for &unit in &representatives { if unit.as_u8().map_or(false, |b| self.config.quit.contains(b)) { continue; } // In many cases, the state we transition to has already been // computed. 'cached_state' will do the minimal amount of work // to check this, and if it exists, immediately return an // already existing state ID. let (next_dfa_id, is_new) = self.cached_state(dfa_id, unit)?; self.dfa.set_transition(dfa_id, unit, next_dfa_id); // If the state ID we got back is newly created, then we need // to compile it, so add it to our uncompiled frontier. if is_new { uncompiled.push(next_dfa_id); } } } debug!( "determinization complete, memory usage: {}, \ dense DFA size: {}, \ is reverse? {}", self.memory_usage(), self.dfa.memory_usage(), self.nfa.is_reverse(), ); // A map from DFA state ID to one or more NFA match IDs. Each NFA match // ID corresponds to a distinct regex pattern that matches in the state // corresponding to the key. let mut matches: BTreeMap<StateID, Vec<PatternID>> = BTreeMap::new(); self.cache.clear(); #[cfg(feature = "logging")] let mut total_pat_len = 0; for (i, state) in self.builder_states.into_iter().enumerate() { if let Some(pat_ids) = state.match_pattern_ids() { let id = self.dfa.to_state_id(i); log! { total_pat_len += pat_ids.len(); } matches.insert(id, pat_ids); } } log! { use core::mem::size_of; let per_elem = size_of::<StateID>() + size_of::<Vec<PatternID>>(); let pats = total_pat_len * size_of::<PatternID>(); let mem = (matches.len() * per_elem) + pats; log::debug!("matches map built, memory usage: {}", mem); } // At this point, we shuffle the "special" states in the final DFA. // This permits a DFA's match loop to detect a match condition (among // other things) by merely inspecting the current state's identifier, // and avoids the need for any additional auxiliary storage. self.dfa.shuffle(matches)?; Ok(()) } /// Return the identifier for the next DFA state given an existing DFA /// state and an input byte. If the next DFA state already exists, then /// return its identifier from the cache. Otherwise, build the state, cache /// it and return its identifier. /// /// This routine returns a boolean indicating whether a new state was /// built. If a new state is built, then the caller needs to add it to its /// frontier of uncompiled DFA states to compute transitions for. fn cached_state( &mut self, dfa_id: StateID, unit: alphabet::Unit, ) -> Result<(StateID, bool), BuildError> { // Compute the set of all reachable NFA states, including epsilons. let empty_builder = self.get_state_builder(); let builder = util::determinize::next( self.nfa, self.config.match_kind, &mut self.sparses, &mut self.stack, &self.builder_states[self.dfa.to_index(dfa_id)], unit, empty_builder, ); self.maybe_add_state(builder) } /// Compute the set of DFA start states and add their identifiers in /// 'dfa_state_ids' (no duplicates are added). fn add_all_starts( &mut self, dfa_state_ids: &mut Vec<StateID>, ) -> Result<(), BuildError> { // These should be the first states added. assert!(dfa_state_ids.is_empty()); // We only want to add (un)anchored starting states that is consistent // with our DFA's configuration. Unconditionally adding both (although // it is the default) can make DFAs quite a bit bigger. if self.dfa.start_kind().has_unanchored() { self.add_start_group(Anchored::No, dfa_state_ids)?; } if self.dfa.start_kind().has_anchored() { self.add_start_group(Anchored::Yes, dfa_state_ids)?; } // I previously has an 'assert' here checking that either // 'dfa_state_ids' was non-empty, or the NFA had zero patterns. But it // turns out this isn't always true. For example, the NFA might have // one or more patterns but where all such patterns are just 'fail' // states. These will ultimately just compile down to DFA dead states, // and since the dead state was added earlier, no new DFA states are // added. And thus, it is valid and okay for 'dfa_state_ids' to be // empty even if there are a non-zero number of patterns in the NFA. // We only need to compute anchored start states for each pattern if it // was requested to do so. if self.dfa.starts_for_each_pattern() { for pid in self.nfa.patterns() { self.add_start_group(Anchored::Pattern(pid), dfa_state_ids)?; } } Ok(()) } /// Add a group of start states for the given match pattern ID. Any new /// DFA states added are pushed on to 'dfa_state_ids'. (No duplicates are /// pushed.) /// /// When pattern_id is None, then this will compile a group of unanchored /// start states (if the DFA is unanchored). When the pattern_id is /// present, then this will compile a group of anchored start states that /// only match the given pattern. /// /// This panics if `anchored` corresponds to an invalid pattern ID. fn add_start_group( &mut self, anchored: Anchored, dfa_state_ids: &mut Vec<StateID>, ) -> Result<(), BuildError> { let nfa_start = match anchored { Anchored::No => self.nfa.start_unanchored(), Anchored::Yes => self.nfa.start_anchored(), Anchored::Pattern(pid) => { self.nfa.start_pattern(pid).expect("valid pattern ID") } }; // When compiling start states, we're careful not to build additional // states that aren't necessary. For example, if the NFA has no word // boundary assertion, then there's no reason to have distinct start // states for 'NonWordByte' and 'WordByte' starting configurations. // Instead, the 'WordByte' starting configuration can just point // directly to the start state for the 'NonWordByte' config. // // Note though that we only need to care about assertions in the prefix // of an NFA since this only concerns the starting states. (Actually, // the most precisely thing we could do it is look at the prefix // assertions of each pattern when 'anchored == Anchored::Pattern', // and then only compile extra states if the prefix is non-empty.) But // we settle for simplicity here instead of absolute minimalism. It is // somewhat rare, after all, for multiple patterns in the same regex to // have different prefix look-arounds. let (id, is_new) = self.add_one_start(nfa_start, Start::NonWordByte)?; self.dfa.set_start_state(anchored, Start::NonWordByte, id); if is_new { dfa_state_ids.push(id); } if !self.nfa.look_set_prefix_any().contains_word() { self.dfa.set_start_state(anchored, Start::WordByte, id); } else { let (id, is_new) = self.add_one_start(nfa_start, Start::WordByte)?; self.dfa.set_start_state(anchored, Start::WordByte, id); if is_new { dfa_state_ids.push(id); } } if !self.nfa.look_set_prefix_any().contains_anchor() { self.dfa.set_start_state(anchored, Start::Text, id); self.dfa.set_start_state(anchored, Start::LineLF, id); self.dfa.set_start_state(anchored, Start::LineCR, id); self.dfa.set_start_state( anchored, Start::CustomLineTerminator, id, ); } else { let (id, is_new) = self.add_one_start(nfa_start, Start::Text)?; self.dfa.set_start_state(anchored, Start::Text, id); if is_new { dfa_state_ids.push(id); } let (id, is_new) = self.add_one_start(nfa_start, Start::LineLF)?; self.dfa.set_start_state(anchored, Start::LineLF, id); if is_new { dfa_state_ids.push(id); } let (id, is_new) = self.add_one_start(nfa_start, Start::LineCR)?; self.dfa.set_start_state(anchored, Start::LineCR, id); if is_new { dfa_state_ids.push(id); } let (id, is_new) = self.add_one_start(nfa_start, Start::CustomLineTerminator)?; self.dfa.set_start_state( anchored, Start::CustomLineTerminator, id, ); if is_new { dfa_state_ids.push(id); } } Ok(()) } /// Add a new DFA start state corresponding to the given starting NFA /// state, and the starting search configuration. (The starting search /// configuration essentially tells us which look-behind assertions are /// true for this particular state.) /// /// The boolean returned indicates whether the state ID returned is a newly /// created state, or a previously cached state. fn add_one_start( &mut self, nfa_start: StateID, start: Start, ) -> Result<(StateID, bool), BuildError> { // Compute the look-behind assertions that are true in this starting // configuration, and the determine the epsilon closure. While // computing the epsilon closure, we only follow condiional epsilon // transitions that satisfy the look-behind assertions in 'look_have'. let mut builder_matches = self.get_state_builder().into_matches(); util::determinize::set_lookbehind_from_start( self.nfa, &start, &mut builder_matches, ); self.sparses.set1.clear(); util::determinize::epsilon_closure( self.nfa, nfa_start, builder_matches.look_have(), &mut self.stack, &mut self.sparses.set1, ); let mut builder = builder_matches.into_nfa(); util::determinize::add_nfa_states( &self.nfa, &self.sparses.set1, &mut builder, ); self.maybe_add_state(builder) } /// Adds the given state to the DFA being built depending on whether it /// already exists in this determinizer's cache. /// /// If it does exist, then the memory used by 'state' is put back into the /// determinizer and the previously created state's ID is returned. (Along /// with 'false', indicating that no new state was added.) /// /// If it does not exist, then the state is added to the DFA being built /// and a fresh ID is allocated (if ID allocation fails, then an error is /// returned) and returned. (Along with 'true', indicating that a new state /// was added.) fn maybe_add_state( &mut self, builder: StateBuilderNFA, ) -> Result<(StateID, bool), BuildError> { if let Some(&cached_id) = self.cache.get(builder.as_bytes()) { // Since we have a cached state, put the constructed state's // memory back into our scratch space, so that it can be reused. self.put_state_builder(builder); return Ok((cached_id, false)); } self.add_state(builder).map(|sid| (sid, true)) } /// Add the given state to the DFA and make it available in the cache. /// /// The state initially has no transitions. That is, it transitions to the /// dead state for all possible inputs, and transitions to the quit state /// for all quit bytes. /// /// If adding the state would exceed the maximum value for StateID, then an /// error is returned. fn add_state( &mut self, builder: StateBuilderNFA, ) -> Result<StateID, BuildError> { let id = self.dfa.add_empty_state()?; if !self.config.quit.is_empty() { for b in self.config.quit.iter() { self.dfa.set_transition( id, alphabet::Unit::u8(b), self.dfa.quit_id(), ); } } let state = builder.to_state(); // States use reference counting internally, so we only need to count // their memory usage once. self.memory_usage_state += state.memory_usage(); self.builder_states.push(state.clone()); self.cache.insert(state, id); self.put_state_builder(builder); if let Some(limit) = self.config.dfa_size_limit { if self.dfa.memory_usage() > limit { return Err(BuildError::dfa_exceeded_size_limit(limit)); } } if let Some(limit) = self.config.determinize_size_limit { if self.memory_usage() > limit { return Err(BuildError::determinize_exceeded_size_limit( limit, )); } } Ok(id) } /// Returns a state builder from this determinizer that might have existing /// capacity. This helps avoid allocs in cases where a state is built that /// turns out to already be cached. /// /// Callers must put the state builder back with 'put_state_builder', /// otherwise the allocation reuse won't work. fn get_state_builder(&mut self) -> StateBuilderEmpty { core::mem::replace( &mut self.scratch_state_builder, StateBuilderEmpty::new(), ) } /// Puts the given state builder back into this determinizer for reuse. /// /// Note that building a 'State' from a builder always creates a new /// alloc, so callers should always put the builder back. fn put_state_builder(&mut self, builder: StateBuilderNFA) { let _ = core::mem::replace( &mut self.scratch_state_builder, builder.clear(), ); } /// Return the memory usage, in bytes, of this determinizer at the current /// point in time. This does not include memory used by the NFA or the /// dense DFA itself. fn memory_usage(&self) -> usize { use core::mem::size_of; self.builder_states.len() * size_of::<State>() // Maps likely use more memory than this, but it's probably close. + self.cache.len() * (size_of::<State>() + size_of::<StateID>()) + self.memory_usage_state + self.stack.capacity() * size_of::<StateID>() + self.scratch_state_builder.capacity() } } <file_sep>/regex-cli/cmd/compile_test.rs use std::{ io::Write, path::{Path, PathBuf}, process::Command, time::{Duration, Instant}, }; use { anyhow::Context, lexopt::{Arg, Parser}, }; use crate::args::{self, Usage}; const REGEX_COMBOS: &[&[&str]] = &[ &["std", "perf", "unicode"], &["std", "perf", "unicode", "perf-dfa-full"], &["std"], &["std", "perf"], &["std", "unicode"], &["std", "unicode-case", "unicode-perl"], ]; const REGEX_LITE_COMBOS: &[&[&str]] = &[&["std", "string"]]; const REGEX_AUTOMATA_COMBOS: &[&[&str]] = &[ &["std", "syntax", "perf", "unicode", "meta", "nfa", "dfa", "hybrid"], // Try out some barebones combinations of individual regex engines. &["std", "syntax", "nfa-pikevm"], &["std", "syntax", "nfa-backtrack"], &["std", "syntax", "hybrid"], &["std", "syntax", "dfa-onepass"], // Now try out some realistic plausible configurations that combine // lots (but maybe not all) regex engines. // // First is dropping 'perf' from the default. &["std", "syntax", "unicode", "meta", "nfa", "dfa", "hybrid"], // Second is dropping 'dfa', which maybe doesn't carry its weight. We are // careful to re-enable the one-pass DFA though. &[ "std", "syntax", "perf", "unicode", "meta", "nfa", "hybrid", "dfa-onepass", ], // This is dropping 'unicode', which comes with a whole bunch of tables. &["std", "syntax", "perf", "meta", "nfa", "dfa", "hybrid"], // Drop 'unicode' and also 'dfa'. But again, we keep the one-pass DFA // around. &["std", "syntax", "perf", "meta", "nfa", "hybrid", "dfa-onepass"], // "I the meta regex API and don't care about perf, but I want full // functionality." &["std", "unicode", "meta"], // "I just want the meta regex API, but just enough to make it work" &["std", "meta"], ]; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = r#" Runs compilation time and binary size tests on a variety of configurations. The output of this test includes compilation time, total binary size and relative binary size. Relative binary size is computed by subtracting the binary size of a simple "Hello, world!" program from the total size of the program compiled with calls to the regex (or regex-automata) crate. The purpose of relative size is to try to capture a metric that tracks the overhead of the regex crate specifically, and not just the total binary size (which might fluctuate with changes to the compiler). The configurations are a combination of several things. 1) Various interesting feature combinations from regex (and regex-automata when it's enabled). 2) Debug and release mode Cargo profiles. Other things, such as lto, are not considered. Instead, we use defaults for options like lto. The arguments given to this command are as follows: The first is a directory containing a checkout of the regex crate repo. The second is a directory where a bunch of different Cargo projects will be written. USAGE: regex-cli compile-test <regex-crate-dir> <out-dir> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "#; let mut config = Config::default(); args::configure(p, USAGE, &mut [&mut config])?; let outdir = config.outdir()?; let revision = git_revision_hash(&config.regexdir()?)?; let baseline_size_dev = baseline_size(&outdir, Profile::Dev)?; let baseline_size_rel = baseline_size(&outdir, Profile::Release)?; // We don't bother bringing in a CSV writer just for this. While we don't // check it, none of our field values should contain quotes or delimiters. // This is still somewhat shady, but not nearly as bad as trying to roll // our own CSV parser. let mut wtr = std::io::stdout(); writeln!(wtr, "name,crate,revision,profile,duration,size,relative-size")?; for test in config.tests()? { let tdir = TestDir::new(&outdir, test)?; tdir.write_cargo_toml()?; tdir.write_main_rs()?; tdir.cargo_clean()?; tdir.cargo_fetch()?; let m = tdir.cargo_build()?; let relative_size = m.size.saturating_sub(match tdir.test.profile { Profile::Dev => baseline_size_dev, Profile::Release => baseline_size_rel, }); write!(wtr, "{},", tdir.test.name())?; if tdir.test.regex_automata { write!(wtr, "regex-automata,")?; } else { write!(wtr, "regex,")?; } write!(wtr, "{},", revision)?; write!(wtr, "{},", tdir.test.profile.as_str())?; write!(wtr, "{:?},", m.duration)?; write!(wtr, "{:?},", m.size)?; write!(wtr, "{:?}", relative_size)?; write!(wtr, "\n")?; } Ok(()) } #[derive(Debug, Default)] struct Config { regexdir: Option<PathBuf>, outdir: Option<PathBuf>, regex_lite: bool, regex_automata: bool, } impl args::Configurable for Config { fn configure( &mut self, _: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("regex-lite") => { self.regex_lite = true; } Arg::Long("regex-automata") => { self.regex_automata = true; } Arg::Value(ref mut v) => { if self.regexdir.is_none() { let dir = PathBuf::from(std::mem::take(v)); self.regexdir = Some( std::fs::canonicalize(&dir).with_context(|| { format!("could not canonicalize {}", dir.display()) })?, ); } else if self.outdir.is_none() { self.outdir = Some(PathBuf::from(std::mem::take(v))); } else { anyhow::bail!("more than 2 arguments were given"); } } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "--regex-automata", "Run tests for regex-automata too.", r#" When enabled, this will run compilation time and binary size tests for regex-automata too. regex-automata has several interesting configurations, so this may add quite a bit of time to the tests. But this is useful because it might provide a way to get better compilation times and binary sizes than is possible with the 'regex' crate proper. Thus, it is useful to track. "#, ), Usage::new( "--regex-lite", "Run tests for regex-lite too.", r#" When enabled, this will run compilation time and binary size tests for regex-lite too. "#, ), ]; USAGES } } impl Config { fn tests(&self) -> anyhow::Result<Vec<Test>> { let mut tests = vec![]; for profile in [Profile::Dev, Profile::Release] { for features in REGEX_COMBOS.iter() { let features = features.iter().map(|f| f.to_string()).collect(); tests.push(Test { regex_dir: self.regexdir()?.to_path_buf(), profile, features, regex_lite: false, regex_automata: false, }); } if self.regex_lite { for features in REGEX_LITE_COMBOS.iter() { let features = features.iter().map(|f| f.to_string()).collect(); tests.push(Test { regex_dir: self.regexdir()?.to_path_buf(), profile, features, regex_lite: true, regex_automata: false, }); } } if self.regex_automata { for features in REGEX_AUTOMATA_COMBOS.iter() { let features = features.iter().map(|f| f.to_string()).collect(); tests.push(Test { regex_dir: self.regexdir()?.to_path_buf(), profile, features, regex_lite: false, regex_automata: true, }); } } } Ok(tests) } fn regexdir(&self) -> anyhow::Result<&Path> { self.regexdir .as_deref() .ok_or_else(|| anyhow::anyhow!("missing <regex-crate-dir>")) } fn outdir(&self) -> anyhow::Result<&Path> { self.outdir .as_deref() .ok_or_else(|| anyhow::anyhow!("missing <out-dir>")) } } #[derive(Debug)] struct TestDir { dir: PathBuf, test: Test, } impl TestDir { /// Creates a new test directory for the given test. The directory for the /// test is created as a child of `parent_dir`. fn new(parent_dir: &Path, test: Test) -> anyhow::Result<TestDir> { let dir = parent_dir.join(test.name()); std::fs::create_dir_all(&dir) .with_context(|| dir.display().to_string())?; Ok(TestDir { dir, test }) } /// Write the Cargo.toml for this test fn write_cargo_toml(&self) -> anyhow::Result<()> { let path = self.dir.join("Cargo.toml"); std::fs::write(&path, self.test.cargo_toml()) .with_context(|| path.display().to_string())?; Ok(()) } /// Write the main.rs for this test. fn write_main_rs(&self) -> anyhow::Result<()> { let path = self.dir.join("main.rs"); std::fs::write(&path, self.test.main_rs()) .with_context(|| path.display().to_string())?; Ok(()) } /// Clean the Cargo project in this directory, to ensure we start fresh. fn cargo_clean(&self) -> anyhow::Result<()> { let status = Command::new("cargo") .arg("clean") .arg("--manifest-path") .arg(self.dir.join("Cargo.toml")) .status() .with_context(|| { format!("'cargo clean' failed for test '{}'", self.test.name()) })?; anyhow::ensure!( status.success(), "'cargo clean' got an error exit code of {:?} for test '{}'", status, self.test.name(), ); Ok(()) } /// Fetch all dependencies for the Cargo project in this directory. fn cargo_fetch(&self) -> anyhow::Result<()> { let status = Command::new("cargo") .arg("fetch") .arg("--manifest-path") .arg(self.dir.join("Cargo.toml")) .status() .with_context(|| { format!("'cargo fetch' failed for test '{}'", self.test.name()) })?; anyhow::ensure!( status.success(), "'cargo fetch' got an error exit code of {:?} for test '{}'", status, self.test.name(), ); Ok(()) } /// Build the test and return the time it took and the size of the binary. /// /// Callers should run 'cargo_fetch()' before this, as this passes the /// '--offline' flag. This ensures that our timing measurement only /// includes build times. fn cargo_build(&self) -> anyhow::Result<Measurement> { let start = Instant::now(); let status = Command::new("cargo") .arg("build") .arg("--manifest-path") .arg(self.dir.join("Cargo.toml")) .arg("--offline") .arg("--profile") .arg(self.test.profile.as_str()) .status() .with_context(|| { format!("'cargo build' failed for test '{}'", self.test.name()) })?; anyhow::ensure!( status.success(), "'cargo build' got an error exit code of {:?} for test '{}'", status, self.test.name(), ); let duration = start.elapsed(); let bin = self .dir .join("target") .join(self.test.profile.as_str_for_target_path()) .join(format!("main{}", std::env::consts::EXE_SUFFIX)); let size = std::fs::metadata(&bin) .with_context(|| bin.display().to_string())? .len(); Ok(Measurement { duration, size }) } } #[derive(Clone, Debug)] struct Measurement { duration: Duration, size: u64, } #[derive(Debug)] struct Test { /// The path to the directory containing the regex crate. regex_dir: PathBuf, /// The Cargo profile to use. profile: Profile, /// The list of crate features to enabled. features: Vec<String>, /// Whether we're testing the regex-lite crate or not. regex_lite: bool, /// Whether we're testing the regex-automata crate or not. regex_automata: bool, } impl Test { /// Returns the name for this test. /// /// The name is meant to be a unique identifier for this test based on its /// configuration. At the time of writing, the configuration space is still /// somewhat small enough where this is reasonable. But if it blows up in /// the future, we might need to reconsider the approach here... But that /// would be sad. fn name(&self) -> String { // Bah, we should probably use an enum for this but I got lazy. assert!(!(self.regex_lite && self.regex_automata)); let krate = if self.regex_lite { "regex-lite" } else if self.regex_automata { "regex-automata" } else { "regex" }; let profile = self.profile.as_str(); let features = self.features.join("_"); format!("{krate}__{profile}__{features}") } /// Return a string corresponding to the `Cargo.toml` for this test. fn cargo_toml(&self) -> String { if self.regex_lite { self.cargo_toml_regex_lite() } else if self.regex_automata { self.cargo_toml_regex_automata() } else { self.cargo_toml_regex() } } /// Return a string corresponding to the `main.rs` for this test. fn main_rs(&self) -> String { if self.regex_lite { self.main_rs_regex_lite() } else if self.regex_automata { self.main_rs_regex_automata() } else { self.main_rs_regex() } } fn cargo_toml_regex(&self) -> String { let name = self.name(); let path = self.regex_dir.display(); let features = self .features .iter() .map(|f| format!(r#""{}""#, f)) .collect::<Vec<String>>() .join(", "); format!( r#" [package] name = "{name}" version = "0.0.0" edition = "2021" publish = false # This detaches this directory from any workspace # in a parent directory. [workspace] [[bin]] name = "main" path = "main.rs" [dependencies.regex] path = "{path}" version = "*" default-features = false features = [{features}] [profile.dev] strip = "symbols" [profile.release] strip = "symbols" "# ) } fn cargo_toml_regex_lite(&self) -> String { let name = self.name(); let path = self.regex_dir.join("regex-lite"); let path = path.display(); let features = self .features .iter() .map(|f| format!(r#""{}""#, f)) .collect::<Vec<String>>() .join(", "); format!( r#" [package] name = "{name}" version = "0.0.0" edition = "2021" publish = false # This detaches this directory from any workspace # in a parent directory. [workspace] [[bin]] name = "main" path = "main.rs" [dependencies.regex-lite] path = "{path}" version = "*" default-features = false features = [{features}] [profile.dev] strip = "symbols" [profile.release] strip = "symbols" "# ) } fn cargo_toml_regex_automata(&self) -> String { let name = self.name(); let path = self.regex_dir.join("regex-automata"); let path = path.display(); let features = self .features .iter() .map(|f| format!(r#""{}""#, f)) .collect::<Vec<String>>() .join(", "); format!( r#" [package] name = "{name}" version = "0.0.0" edition = "2021" publish = false # This detaches this directory from any workspace # in a parent directory. [workspace] [[bin]] name = "main" path = "main.rs" [dependencies.regex-automata] path = "{path}" default-features = false features = [{features}] [profile.dev] strip = "symbols" [profile.release] strip = "symbols" "# ) } fn main_rs_regex(&self) -> String { format!( r#" use regex::{{bytes, Regex, RegexSet}}; fn main() {{ let re = Regex::new("a").unwrap(); assert!(re.is_match("a")); assert_eq!("a", re.find("a").unwrap().as_str()); assert_eq!("a", &re.captures("a").unwrap()[0]); assert_eq!(2, re.find_iter("aa").count()); assert_eq!(2, re.captures_iter("aa").count()); let re = bytes::Regex::new("a").unwrap(); assert!(re.is_match(b"a")); assert_eq!(b"a", re.find(b"a").unwrap().as_bytes()); assert_eq!(b"a", &re.captures(b"a").unwrap()[0]); assert_eq!(2, re.find_iter(b"aa").count()); assert_eq!(2, re.captures_iter(b"aa").count()); let re = RegexSet::new(&["a", "b"]).unwrap(); assert!(re.is_match("a")); assert_eq!(2, re.matches("acdb").iter().count()); let re = bytes::RegexSet::new(&["a", "b"]).unwrap(); assert!(re.is_match(b"a")); assert_eq!(2, re.matches(b"acdb").iter().count()); }} "# ) } fn main_rs_regex_lite(&self) -> String { format!( r#" use regex_lite::{{Regex}}; fn main() {{ let re = Regex::new("a").unwrap(); assert!(re.is_match("a")); assert_eq!("a", re.find("a").unwrap().as_str()); assert_eq!("a", &re.captures("a").unwrap()[0]); assert_eq!(2, re.find_iter("aa").count()); assert_eq!(2, re.captures_iter("aa").count()); }} "# ) } fn main_rs_regex_automata(&self) -> String { use std::fmt::Write; let mut bufuse = String::new(); let mut bufmain = String::new(); if self.contains("nfa") || self.contains("nfa-pikevm") { writeln!( bufuse, r#" use regex_automata::nfa::thompson::pikevm::PikeVM; "# ) .unwrap(); writeln!( bufmain, r#" let re = PikeVM::new("a").unwrap(); let mut cache = re.create_cache(); assert!(re.is_match(&mut cache, "a")); assert_eq!(0..1, re.find(&mut cache, "a").unwrap().range()); assert_eq!(2, re.find_iter(&mut cache, "aa").count()); assert_eq!(2, re.captures_iter(&mut cache, "aa").count()); "# ) .unwrap(); } if self.contains("nfa") || self.contains("nfa-backtrack") { writeln!( bufuse, r#" use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; "# ) .unwrap(); writeln!( bufmain, r#" let re = BoundedBacktracker::new("a").unwrap(); let mut cache = re.create_cache(); assert!(re.try_is_match(&mut cache, "a").unwrap()); assert_eq!(0..1, re.try_find(&mut cache, "a").unwrap().unwrap().range()); assert_eq!(2, re.try_find_iter(&mut cache, "aa").count()); assert_eq!(2, re.try_captures_iter(&mut cache, "aa").count()); "# ) .unwrap(); } if self.contains("hybrid") { writeln!( bufuse, r#" use regex_automata::hybrid; "# ) .unwrap(); writeln!( bufmain, r#" let re = hybrid::dfa::DFA::new("a").unwrap(); let mut cache = re.create_cache(); let input = regex_automata::Input::new("a"); assert_eq!(1, re.try_search_fwd(&mut cache, &input).unwrap().unwrap().offset()); "# ).unwrap(); } if self.contains("dfa") || (self.contains("dfa-build") && self.contains("dfa-search")) { writeln!( bufuse, r#" use regex_automata::dfa::{{Automaton, dense, sparse}}; "# ) .unwrap(); writeln!( bufmain, r#" let re = dense::DFA::new("a").unwrap(); let input = regex_automata::Input::new("a"); assert_eq!(1, re.try_search_fwd(&input).unwrap().unwrap().offset()); let re = sparse::DFA::new("a").unwrap(); let input = regex_automata::Input::new("a"); assert_eq!(1, re.try_search_fwd(&input).unwrap().unwrap().offset()); "# ) .unwrap(); } if self.contains("dfa") || self.contains("dfa-onepass") { writeln!( bufuse, r#" use regex_automata::dfa::onepass; "# ) .unwrap(); writeln!( bufmain, r#" let re = onepass::DFA::new("a").unwrap(); let mut cache = re.create_cache(); assert!(re.is_match(&mut cache, "a")); let input = regex_automata::Input::new("a"); let mut caps = re.create_captures(); assert!(re.try_search(&mut cache, &input, &mut caps).is_ok()); assert_eq!(0..1, caps.get_match().unwrap().range()); "# ) .unwrap(); } if self.contains("meta") { writeln!( bufuse, r#" use regex_automata::meta; "# ) .unwrap(); writeln!( bufmain, r#" let re = meta::Regex::new("a").unwrap(); assert!(re.is_match("a")); assert_eq!(0..1, re.find("a").unwrap().range()); let mut caps = re.create_captures(); re.captures("a", &mut caps); assert_eq!(0..1, caps.get_match().unwrap().range()); assert_eq!(2, re.find_iter("aa").count()); assert_eq!(2, re.captures_iter("aa").count()); "# ) .unwrap(); } format!( r#" {bufuse} fn main() {{ {bufmain} }} "# ) } fn contains(&self, feature_name: &str) -> bool { self.features.iter().find(|name| feature_name == &**name).is_some() } } /// The Cargo profile to use. #[derive(Clone, Copy, Debug)] enum Profile { Dev, Release, } impl Profile { fn as_str(&self) -> &'static str { match *self { Profile::Dev => "dev", Profile::Release => "release", } } fn as_str_for_target_path(&self) -> &'static str { match *self { Profile::Dev => "debug", Profile::Release => "release", } } } /// Compiles a basic "Hello, world!" program with the given profile and returns /// the size of the resulting binary. fn baseline_size(parent_dir: &Path, profile: Profile) -> anyhow::Result<u64> { let dir = parent_dir.join(profile.as_str()); let cargo_toml_path = dir.join("Cargo.toml"); let main_rs_path = dir.join("main.rs"); std::fs::create_dir_all(&dir) .with_context(|| dir.display().to_string())?; std::fs::write(&cargo_toml_path, baseline_cargo_toml()) .with_context(|| dir.display().to_string())?; std::fs::write( &main_rs_path, r#"fn main() {{ println!("Hello, world!"); }}"#, ) .with_context(|| dir.display().to_string())?; let status = Command::new("cargo") .arg("clean") .arg("--manifest-path") .arg(&cargo_toml_path) .status() .with_context(|| format!("'cargo clean' failed for baseline"))?; anyhow::ensure!( status.success(), "'cargo clean' got an error exit code of {:?} for baseline", status, ); let status = Command::new("cargo") .arg("build") .arg("--manifest-path") .arg(dir.join("Cargo.toml")) .arg("--offline") .arg("--profile") .arg(profile.as_str()) .status() .with_context(|| format!("'cargo build' failed for baseline"))?; anyhow::ensure!( status.success(), "'cargo build' got an error exit code of {:?} for baseline", status, ); let bin = dir .join("target") .join(profile.as_str_for_target_path()) .join(format!("main{}", std::env::consts::EXE_SUFFIX)); let size = std::fs::metadata(&bin) .with_context(|| bin.display().to_string())? .len(); Ok(size) } fn baseline_cargo_toml() -> String { format!( r#" [package] name = "baseline" version = "0.0.0" edition = "2021" publish = false # This detaches this directory from any workspace # in a parent directory. [workspace] [[bin]] name = "main" path = "main.rs" [profile.dev] strip = "symbols" [profile.release] strip = "symbols" "# ) } fn git_revision_hash(regex_dir: &Path) -> anyhow::Result<String> { let output = std::process::Command::new("git") .current_dir(regex_dir) .args(&["rev-parse", "--short=10", "HEAD"]) .output() .context("failed to run 'git rev-parse'")?; let v = String::from_utf8_lossy(&output.stdout).trim().to_string(); anyhow::ensure!(!v.is_empty(), "got empty output from 'git rev-parse'",); Ok(v) } <file_sep>/regex-automata/src/meta/mod.rs /*! Provides a regex matcher that composes several other regex matchers automatically. This module is home to a meta [`Regex`], which provides a convenient high level API for executing regular expressions in linear time. # Comparison with the `regex` crate A meta `Regex` is the implementation used directly by the `regex` crate. Indeed, the `regex` crate API is essentially just a light wrapper over a meta `Regex`. This means that if you need the full flexibility offered by this API, then you should be able to switch to using this API directly without any changes in match semantics or syntax. However, there are some API level differences: * The `regex` crate API returns match objects that include references to the haystack itself, which in turn makes it easy to access the matching strings without having to slice the haystack yourself. In contrast, a meta `Regex` returns match objects that only have offsets in them. * At time of writing, a meta `Regex` doesn't have some of the convenience routines that the `regex` crate has, such as replacements. Note though that [`Captures::interpolate_string`](crate::util::captures::Captures::interpolate_string) will handle the replacement string interpolation for you. * A meta `Regex` supports the [`Input`](crate::Input) abstraction, which provides a way to configure a search in more ways than is supported by the `regex` crate. For example, [`Input::anchored`](crate::Input::anchored) can be used to run an anchored search, regardless of whether the pattern is itself anchored with a `^`. * A meta `Regex` supports multi-pattern searching everywhere. Indeed, every [`Match`](crate::Match) returned by the search APIs include a [`PatternID`](crate::PatternID) indicating which pattern matched. In the single pattern case, all matches correspond to [`PatternID::ZERO`](crate::PatternID::ZERO). In contrast, the `regex` crate has distinct `Regex` and a `RegexSet` APIs. The former only supports a single pattern, while the latter supports multiple patterns but cannot report the offsets of a match. * A meta `Regex` provides the explicit capability of bypassing its internal memory pool for automatically acquiring mutable scratch space required by its internal regex engines. Namely, a [`Cache`] can be explicitly provided to lower level routines such as [`Regex::search_with`]. */ pub use self::{ error::BuildError, regex::{ Builder, Cache, CapturesMatches, Config, FindMatches, Regex, Split, SplitN, }, }; mod error; #[cfg(any(feature = "dfa-build", feature = "hybrid"))] mod limited; mod literal; mod regex; mod reverse_inner; #[cfg(any(feature = "dfa-build", feature = "hybrid"))] mod stopat; mod strategy; mod wrappers; <file_sep>/regex-automata/src/util/iter.rs /*! Generic helpers for iteration of matches from a regex engine in a haystack. The principle type in this module is a [`Searcher`]. A `Searcher` provides its own lower level iterator-like API in addition to methods for constructing types that implement `Iterator`. The documentation for `Searcher` explains a bit more about why these different APIs exist. Currently, this module supports iteration over any regex engine that works with the [`HalfMatch`], [`Match`] or [`Captures`] types. */ #[cfg(feature = "alloc")] use crate::util::captures::Captures; use crate::util::search::{HalfMatch, Input, Match, MatchError}; /// A searcher for creating iterators and performing lower level iteration. /// /// This searcher encapsulates the logic required for finding all successive /// non-overlapping matches in a haystack. In theory, iteration would look /// something like this: /// /// 1. Setting the start position to `0`. /// 2. Execute a regex search. If no match, end iteration. /// 3. Report the match and set the start position to the end of the match. /// 4. Go back to (2). /// /// And if this were indeed the case, it's likely that `Searcher` wouldn't /// exist. Unfortunately, because a regex may match the empty string, the above /// logic won't work for all possible regexes. Namely, if an empty match is /// found, then step (3) would set the start position of the search to the /// position it was at. Thus, iteration would never end. /// /// Instead, a `Searcher` knows how to detect these cases and forcefully /// advance iteration in the case of an empty match that overlaps with a /// previous match. /// /// If you know that your regex cannot match any empty string, then the simple /// algorithm described above will work correctly. /// /// When possible, prefer the iterators defined on the regex engine you're /// using. This tries to abstract over the regex engine and is thus a bit more /// unwieldy to use. /// /// In particular, a `Searcher` is not itself an iterator. Instead, it provides /// `advance` routines that permit moving the search along explicitly. It also /// provides various routines, like [`Searcher::into_matches_iter`], that /// accept a closure (representing how a regex engine executes a search) and /// returns a conventional iterator. /// /// The lifetime parameters come from the [`Input`] type passed to /// [`Searcher::new`]: /// /// * `'h` is the lifetime of the underlying haystack. /// /// # Searcher vs Iterator /// /// Why does a search type with "advance" APIs exist at all when we also have /// iterators? Unfortunately, the reasoning behind this split is a complex /// combination of the following things: /// /// 1. While many of the regex engines expose their own iterators, it is also /// nice to expose this lower level iteration helper because it permits callers /// to provide their own `Input` configuration. Moreover, a `Searcher` can work /// with _any_ regex engine instead of only the ones defined in this crate. /// This way, everyone benefits from a shared iteration implementation. /// 2. There are many different regex engines that, while they have the same /// match semantics, they have slightly different APIs. Iteration is just /// complex enough to want to share code, and so we need a way of abstracting /// over those different regex engines. While we could define a new trait that /// describes any regex engine search API, it would wind up looking very close /// to a closure. While there may still be reasons for the more generic trait /// to exist, for now and for the purposes of iteration, we use a closure. /// Closures also provide a lot of easy flexibility at the call site, in that /// they permit the caller to borrow any kind of state they want for use during /// each search call. /// 3. As a result of using closures, and because closures are anonymous types /// that cannot be named, it is difficult to encapsulate them without both /// costs to speed and added complexity to the public API. For example, in /// defining an iterator type like /// [`dfa::regex::FindMatches`](crate::dfa::regex::FindMatches), /// if we use a closure internally, it's not possible to name this type in the /// return type of the iterator constructor. Thus, the only way around it is /// to erase the type by boxing it and turning it into a `Box<dyn FnMut ...>`. /// This boxed closure is unlikely to be inlined _and_ it infects the public /// API in subtle ways. Namely, unless you declare the closure as implementing /// `Send` and `Sync`, then the resulting iterator type won't implement it /// either. But there are practical issues with requiring the closure to /// implement `Send` and `Sync` that result in other API complexities that /// are beyond the scope of this already long exposition. /// 4. Some regex engines expose more complex match information than just /// "which pattern matched" and "at what offsets." For example, the PikeVM /// exposes match spans for each capturing group that participated in the /// match. In such cases, it can be quite beneficial to reuse the capturing /// group allocation on subsequent searches. A proper iterator doesn't permit /// this API due to its interface, so it's useful to have something a bit lower /// level that permits callers to amortize allocations while also reusing a /// shared implementation of iteration. (See the documentation for /// [`Searcher::advance`] for an example of using the "advance" API with the /// PikeVM.) /// /// What this boils down to is that there are "advance" APIs which require /// handing a closure to it for every call, and there are also APIs to create /// iterators from a closure. The former are useful for _implementing_ /// iterators or when you need more flexibility, while the latter are useful /// for conveniently writing custom iterators on-the-fly. /// /// # Example: iterating with captures /// /// Several regex engines in this crate over convenient iterator APIs over /// [`Captures`] values. To do so, this requires allocating a new `Captures` /// value for each iteration step. This can perhaps be more costly than you /// might want. Instead of implementing your own iterator to avoid that /// cost (which can be a little subtle if you want to handle empty matches /// correctly), you can use this `Searcher` to do it for you: /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::iter::Searcher, /// Input, Span, /// }; /// /// let re = PikeVM::new("foo(?P<numbers>[0-9]+)")?; /// let haystack = "foo1 foo12 foo123"; /// /// let mut caps = re.create_captures(); /// let mut cache = re.create_cache(); /// let mut matches = vec![]; /// let mut searcher = Searcher::new(Input::new(haystack)); /// while let Some(_) = searcher.advance(|input| { /// re.search(&mut cache, input, &mut caps); /// Ok(caps.get_match()) /// }) { /// // The unwrap is OK since 'numbers' matches if the pattern matches. /// matches.push(caps.get_group_by_name("numbers").unwrap()); /// } /// assert_eq!(matches, vec![ /// Span::from(3..4), /// Span::from(8..10), /// Span::from(14..17), /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Searcher<'h> { /// The input parameters to give to each regex engine call. /// /// The start position of the search is mutated during iteration. input: Input<'h>, /// Records the end offset of the most recent match. This is necessary to /// handle a corner case for preventing empty matches from overlapping with /// the ending bounds of a prior match. last_match_end: Option<usize>, } impl<'h> Searcher<'h> { /// Create a new fallible non-overlapping matches iterator. /// /// The given `input` provides the parameters (including the haystack), /// while the `finder` represents a closure that calls the underlying regex /// engine. The closure may borrow any additional state that is needed, /// such as a prefilter scanner. pub fn new(input: Input<'h>) -> Searcher<'h> { Searcher { input, last_match_end: None } } /// Returns the current `Input` used by this searcher. /// /// The `Input` returned is generally equivalent to the one given to /// [`Searcher::new`], but its start position may be different to reflect /// the start of the next search to be executed. pub fn input<'s>(&'s self) -> &'s Input<'h> { &self.input } /// Return the next half match for an infallible search if one exists, and /// advance to the next position. /// /// This is like `try_advance_half`, except errors are converted into /// panics. /// /// # Panics /// /// If the given closure returns an error, then this panics. This is useful /// when you know your underlying regex engine has been configured to not /// return an error. /// /// # Example /// /// This example shows how to use a `Searcher` to iterate over all matches /// when using a DFA, which only provides "half" matches. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// util::iter::Searcher, /// HalfMatch, Input, /// }; /// /// let re = DFA::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); /// let mut it = Searcher::new(input); /// /// let expected = Some(HalfMatch::must(0, 10)); /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = Some(HalfMatch::must(0, 21)); /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = Some(HalfMatch::must(0, 32)); /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = None; /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This correctly moves iteration forward even when an empty match occurs: /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// util::iter::Searcher, /// HalfMatch, Input, /// }; /// /// let re = DFA::new(r"a|")?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("abba"); /// let mut it = Searcher::new(input); /// /// let expected = Some(HalfMatch::must(0, 1)); /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = Some(HalfMatch::must(0, 2)); /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = Some(HalfMatch::must(0, 4)); /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = None; /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn advance_half<F>(&mut self, finder: F) -> Option<HalfMatch> where F: FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, { match self.try_advance_half(finder) { Ok(m) => m, Err(err) => panic!( "unexpected regex half find error: {}\n\ to handle find errors, use 'try' or 'search' methods", err, ), } } /// Return the next match for an infallible search if one exists, and /// advance to the next position. /// /// The search is advanced even in the presence of empty matches by /// forbidding empty matches from overlapping with any other match. /// /// This is like `try_advance`, except errors are converted into panics. /// /// # Panics /// /// If the given closure returns an error, then this panics. This is useful /// when you know your underlying regex engine has been configured to not /// return an error. /// /// # Example /// /// This example shows how to use a `Searcher` to iterate over all matches /// when using a regex based on lazy DFAs: /// /// ``` /// use regex_automata::{ /// hybrid::regex::Regex, /// util::iter::Searcher, /// Match, Input, /// }; /// /// let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); /// let mut it = Searcher::new(input); /// /// let expected = Some(Match::must(0, 0..10)); /// let got = it.advance(|input| re.try_search(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = Some(Match::must(0, 11..21)); /// let got = it.advance(|input| re.try_search(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = Some(Match::must(0, 22..32)); /// let got = it.advance(|input| re.try_search(&mut cache, input)); /// assert_eq!(expected, got); /// /// let expected = None; /// let got = it.advance(|input| re.try_search(&mut cache, input)); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This example shows the same as above, but with the PikeVM. This example /// is useful because it shows how to use this API even when the regex /// engine doesn't directly return a `Match`. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::iter::Searcher, /// Match, Input, /// }; /// /// let re = PikeVM::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); /// let mut it = Searcher::new(input); /// /// let expected = Some(Match::must(0, 0..10)); /// let got = it.advance(|input| { /// re.search(&mut cache, input, &mut caps); /// Ok(caps.get_match()) /// }); /// // Note that if we wanted to extract capturing group spans, we could /// // do that here with 'caps'. /// assert_eq!(expected, got); /// /// let expected = Some(Match::must(0, 11..21)); /// let got = it.advance(|input| { /// re.search(&mut cache, input, &mut caps); /// Ok(caps.get_match()) /// }); /// assert_eq!(expected, got); /// /// let expected = Some(Match::must(0, 22..32)); /// let got = it.advance(|input| { /// re.search(&mut cache, input, &mut caps); /// Ok(caps.get_match()) /// }); /// assert_eq!(expected, got); /// /// let expected = None; /// let got = it.advance(|input| { /// re.search(&mut cache, input, &mut caps); /// Ok(caps.get_match()) /// }); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn advance<F>(&mut self, finder: F) -> Option<Match> where F: FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, { match self.try_advance(finder) { Ok(m) => m, Err(err) => panic!( "unexpected regex find error: {}\n\ to handle find errors, use 'try' or 'search' methods", err, ), } } /// Return the next half match for a fallible search if one exists, and /// advance to the next position. /// /// This is like `advance_half`, except it permits callers to handle errors /// during iteration. #[inline] pub fn try_advance_half<F>( &mut self, mut finder: F, ) -> Result<Option<HalfMatch>, MatchError> where F: FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, { let mut m = match finder(&self.input)? { None => return Ok(None), Some(m) => m, }; if Some(m.offset()) == self.last_match_end { m = match self.handle_overlapping_empty_half_match(m, finder)? { None => return Ok(None), Some(m) => m, }; } self.input.set_start(m.offset()); self.last_match_end = Some(m.offset()); Ok(Some(m)) } /// Return the next match for a fallible search if one exists, and advance /// to the next position. /// /// This is like `advance`, except it permits callers to handle errors /// during iteration. #[inline] pub fn try_advance<F>( &mut self, mut finder: F, ) -> Result<Option<Match>, MatchError> where F: FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, { let mut m = match finder(&self.input)? { None => return Ok(None), Some(m) => m, }; if m.is_empty() && Some(m.end()) == self.last_match_end { m = match self.handle_overlapping_empty_match(m, finder)? { None => return Ok(None), Some(m) => m, }; } self.input.set_start(m.end()); self.last_match_end = Some(m.end()); Ok(Some(m)) } /// Given a closure that executes a single search, return an iterator over /// all successive non-overlapping half matches. /// /// The iterator returned yields result values. If the underlying regex /// engine is configured to never return an error, consider calling /// [`TryHalfMatchesIter::infallible`] to convert errors into panics. /// /// # Example /// /// This example shows how to use a `Searcher` to create a proper /// iterator over half matches. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// util::iter::Searcher, /// HalfMatch, Input, /// }; /// /// let re = DFA::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); /// let mut it = Searcher::new(input).into_half_matches_iter(|input| { /// re.try_search_fwd(&mut cache, input) /// }); /// /// let expected = Some(Ok(HalfMatch::must(0, 10))); /// assert_eq!(expected, it.next()); /// /// let expected = Some(Ok(HalfMatch::must(0, 21))); /// assert_eq!(expected, it.next()); /// /// let expected = Some(Ok(HalfMatch::must(0, 32))); /// assert_eq!(expected, it.next()); /// /// let expected = None; /// assert_eq!(expected, it.next()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn into_half_matches_iter<F>( self, finder: F, ) -> TryHalfMatchesIter<'h, F> where F: FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, { TryHalfMatchesIter { it: self, finder } } /// Given a closure that executes a single search, return an iterator over /// all successive non-overlapping matches. /// /// The iterator returned yields result values. If the underlying regex /// engine is configured to never return an error, consider calling /// [`TryMatchesIter::infallible`] to convert errors into panics. /// /// # Example /// /// This example shows how to use a `Searcher` to create a proper /// iterator over matches. /// /// ``` /// use regex_automata::{ /// hybrid::regex::Regex, /// util::iter::Searcher, /// Match, Input, /// }; /// /// let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); /// let mut it = Searcher::new(input).into_matches_iter(|input| { /// re.try_search(&mut cache, input) /// }); /// /// let expected = Some(Ok(Match::must(0, 0..10))); /// assert_eq!(expected, it.next()); /// /// let expected = Some(Ok(Match::must(0, 11..21))); /// assert_eq!(expected, it.next()); /// /// let expected = Some(Ok(Match::must(0, 22..32))); /// assert_eq!(expected, it.next()); /// /// let expected = None; /// assert_eq!(expected, it.next()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn into_matches_iter<F>(self, finder: F) -> TryMatchesIter<'h, F> where F: FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, { TryMatchesIter { it: self, finder } } /// Given a closure that executes a single search, return an iterator over /// all successive non-overlapping `Captures` values. /// /// The iterator returned yields result values. If the underlying regex /// engine is configured to never return an error, consider calling /// [`TryCapturesIter::infallible`] to convert errors into panics. /// /// Unlike the other iterator constructors, this accepts an initial /// `Captures` value. This `Captures` value is reused for each search, and /// the iterator implementation clones it before returning it. The caller /// must provide this value because the iterator is purposely ignorant /// of the underlying regex engine and thus doesn't know how to create /// one itself. More to the point, a `Captures` value itself has a few /// different constructors, which change which kind of information is /// available to query in exchange for search performance. /// /// # Example /// /// This example shows how to use a `Searcher` to create a proper iterator /// over `Captures` values, which provides access to all capturing group /// spans for each match. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::iter::Searcher, /// Input, /// }; /// /// let re = PikeVM::new( /// r"(?P<y>[0-9]{4})-(?P<m>[0-9]{2})-(?P<d>[0-9]{2})", /// )?; /// let (mut cache, caps) = (re.create_cache(), re.create_captures()); /// /// let haystack = "2010-03-14 2016-10-08 2020-10-22"; /// let input = Input::new(haystack); /// let mut it = Searcher::new(input) /// .into_captures_iter(caps, |input, caps| { /// re.search(&mut cache, input, caps); /// Ok(()) /// }); /// /// let got = it.next().expect("first date")?; /// let year = got.get_group_by_name("y").expect("must match"); /// assert_eq!("2010", &haystack[year]); /// /// let got = it.next().expect("second date")?; /// let month = got.get_group_by_name("m").expect("must match"); /// assert_eq!("10", &haystack[month]); /// /// let got = it.next().expect("third date")?; /// let day = got.get_group_by_name("d").expect("must match"); /// assert_eq!("22", &haystack[day]); /// /// assert!(it.next().is_none()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "alloc")] #[inline] pub fn into_captures_iter<F>( self, caps: Captures, finder: F, ) -> TryCapturesIter<'h, F> where F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, { TryCapturesIter { it: self, caps, finder } } /// Handles the special case of a match that begins where the previous /// match ended. Without this special handling, it'd be possible to get /// stuck where an empty match never results in forward progress. This /// also makes it more consistent with how presiding general purpose regex /// engines work. #[cold] #[inline(never)] fn handle_overlapping_empty_half_match<F>( &mut self, _: HalfMatch, mut finder: F, ) -> Result<Option<HalfMatch>, MatchError> where F: FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, { // Since we are only here when 'm.offset()' matches the offset of the // last match, it follows that this must have been an empty match. // Since we both need to make progress *and* prevent overlapping // matches, we discard this match and advance the search by 1. // // Note that this may start a search in the middle of a codepoint. The // regex engines themselves are expected to deal with that and not // report any matches within a codepoint if they are configured in // UTF-8 mode. self.input.set_start(self.input.start().checked_add(1).unwrap()); finder(&self.input) } /// Handles the special case of an empty match by ensuring that 1) the /// iterator always advances and 2) empty matches never overlap with other /// matches. /// /// (1) is necessary because we principally make progress by setting the /// starting location of the next search to the ending location of the last /// match. But if a match is empty, then this results in a search that does /// not advance and thus does not terminate. /// /// (2) is not strictly necessary, but makes intuitive sense and matches /// the presiding behavior of most general purpose regex engines. The /// "intuitive sense" here is that we want to report NON-overlapping /// matches. So for example, given the regex 'a|(?:)' against the haystack /// 'a', without the special handling, you'd get the matches [0, 1) and [1, /// 1), where the latter overlaps with the end bounds of the former. /// /// Note that we mark this cold and forcefully prevent inlining because /// handling empty matches like this is extremely rare and does require /// quite a bit of code, comparatively. Keeping this code out of the main /// iterator function keeps it smaller and more amenable to inlining /// itself. #[cold] #[inline(never)] fn handle_overlapping_empty_match<F>( &mut self, m: Match, mut finder: F, ) -> Result<Option<Match>, MatchError> where F: FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, { assert!(m.is_empty()); self.input.set_start(self.input.start().checked_add(1).unwrap()); finder(&self.input) } } /// An iterator over all non-overlapping half matches for a fallible search. /// /// The iterator yields a `Result<HalfMatch, MatchError>` value until no more /// matches could be found. /// /// The type parameters are as follows: /// /// * `F` represents the type of a closure that executes the search. /// /// The lifetime parameters come from the [`Input`] type: /// /// * `'h` is the lifetime of the underlying haystack. /// /// When possible, prefer the iterators defined on the regex engine you're /// using. This tries to abstract over the regex engine and is thus a bit more /// unwieldy to use. /// /// This iterator is created by [`Searcher::into_half_matches_iter`]. pub struct TryHalfMatchesIter<'h, F> { it: Searcher<'h>, finder: F, } impl<'h, F> TryHalfMatchesIter<'h, F> { /// Return an infallible version of this iterator. /// /// Any item yielded that corresponds to an error results in a panic. This /// is useful if your underlying regex engine is configured in a way that /// it is guaranteed to never return an error. pub fn infallible(self) -> HalfMatchesIter<'h, F> { HalfMatchesIter(self) } /// Returns the current `Input` used by this iterator. /// /// The `Input` returned is generally equivalent to the one used to /// construct this iterator, but its start position may be different to /// reflect the start of the next search to be executed. pub fn input<'i>(&'i self) -> &'i Input<'h> { self.it.input() } } impl<'h, F> Iterator for TryHalfMatchesIter<'h, F> where F: FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, { type Item = Result<HalfMatch, MatchError>; #[inline] fn next(&mut self) -> Option<Result<HalfMatch, MatchError>> { self.it.try_advance_half(&mut self.finder).transpose() } } impl<'h, F> core::fmt::Debug for TryHalfMatchesIter<'h, F> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("TryHalfMatchesIter") .field("it", &self.it) .field("finder", &"<closure>") .finish() } } /// An iterator over all non-overlapping half matches for an infallible search. /// /// The iterator yields a [`HalfMatch`] value until no more matches could be /// found. /// /// The type parameters are as follows: /// /// * `F` represents the type of a closure that executes the search. /// /// The lifetime parameters come from the [`Input`] type: /// /// * `'h` is the lifetime of the underlying haystack. /// /// When possible, prefer the iterators defined on the regex engine you're /// using. This tries to abstract over the regex engine and is thus a bit more /// unwieldy to use. /// /// This iterator is created by [`Searcher::into_half_matches_iter`] and /// then calling [`TryHalfMatchesIter::infallible`]. #[derive(Debug)] pub struct HalfMatchesIter<'h, F>(TryHalfMatchesIter<'h, F>); impl<'h, F> HalfMatchesIter<'h, F> { /// Returns the current `Input` used by this iterator. /// /// The `Input` returned is generally equivalent to the one used to /// construct this iterator, but its start position may be different to /// reflect the start of the next search to be executed. pub fn input<'i>(&'i self) -> &'i Input<'h> { self.0.it.input() } } impl<'h, F> Iterator for HalfMatchesIter<'h, F> where F: FnMut(&Input<'_>) -> Result<Option<HalfMatch>, MatchError>, { type Item = HalfMatch; #[inline] fn next(&mut self) -> Option<HalfMatch> { match self.0.next()? { Ok(m) => Some(m), Err(err) => panic!( "unexpected regex half find error: {}\n\ to handle find errors, use 'try' or 'search' methods", err, ), } } } /// An iterator over all non-overlapping matches for a fallible search. /// /// The iterator yields a `Result<Match, MatchError>` value until no more /// matches could be found. /// /// The type parameters are as follows: /// /// * `F` represents the type of a closure that executes the search. /// /// The lifetime parameters come from the [`Input`] type: /// /// * `'h` is the lifetime of the underlying haystack. /// /// When possible, prefer the iterators defined on the regex engine you're /// using. This tries to abstract over the regex engine and is thus a bit more /// unwieldy to use. /// /// This iterator is created by [`Searcher::into_matches_iter`]. pub struct TryMatchesIter<'h, F> { it: Searcher<'h>, finder: F, } impl<'h, F> TryMatchesIter<'h, F> { /// Return an infallible version of this iterator. /// /// Any item yielded that corresponds to an error results in a panic. This /// is useful if your underlying regex engine is configured in a way that /// it is guaranteed to never return an error. pub fn infallible(self) -> MatchesIter<'h, F> { MatchesIter(self) } /// Returns the current `Input` used by this iterator. /// /// The `Input` returned is generally equivalent to the one used to /// construct this iterator, but its start position may be different to /// reflect the start of the next search to be executed. pub fn input<'i>(&'i self) -> &'i Input<'h> { self.it.input() } } impl<'h, F> Iterator for TryMatchesIter<'h, F> where F: FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, { type Item = Result<Match, MatchError>; #[inline] fn next(&mut self) -> Option<Result<Match, MatchError>> { self.it.try_advance(&mut self.finder).transpose() } } impl<'h, F> core::fmt::Debug for TryMatchesIter<'h, F> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("TryMatchesIter") .field("it", &self.it) .field("finder", &"<closure>") .finish() } } /// An iterator over all non-overlapping matches for an infallible search. /// /// The iterator yields a [`Match`] value until no more matches could be found. /// /// The type parameters are as follows: /// /// * `F` represents the type of a closure that executes the search. /// /// The lifetime parameters come from the [`Input`] type: /// /// * `'h` is the lifetime of the underlying haystack. /// /// When possible, prefer the iterators defined on the regex engine you're /// using. This tries to abstract over the regex engine and is thus a bit more /// unwieldy to use. /// /// This iterator is created by [`Searcher::into_matches_iter`] and /// then calling [`TryMatchesIter::infallible`]. #[derive(Debug)] pub struct MatchesIter<'h, F>(TryMatchesIter<'h, F>); impl<'h, F> MatchesIter<'h, F> { /// Returns the current `Input` used by this iterator. /// /// The `Input` returned is generally equivalent to the one used to /// construct this iterator, but its start position may be different to /// reflect the start of the next search to be executed. pub fn input<'i>(&'i self) -> &'i Input<'h> { self.0.it.input() } } impl<'h, F> Iterator for MatchesIter<'h, F> where F: FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, { type Item = Match; #[inline] fn next(&mut self) -> Option<Match> { match self.0.next()? { Ok(m) => Some(m), Err(err) => panic!( "unexpected regex find error: {}\n\ to handle find errors, use 'try' or 'search' methods", err, ), } } } /// An iterator over all non-overlapping captures for a fallible search. /// /// The iterator yields a `Result<Captures, MatchError>` value until no more /// matches could be found. /// /// The type parameters are as follows: /// /// * `F` represents the type of a closure that executes the search. /// /// The lifetime parameters come from the [`Input`] type: /// /// * `'h` is the lifetime of the underlying haystack. /// /// When possible, prefer the iterators defined on the regex engine you're /// using. This tries to abstract over the regex engine and is thus a bit more /// unwieldy to use. /// /// This iterator is created by [`Searcher::into_captures_iter`]. #[cfg(feature = "alloc")] pub struct TryCapturesIter<'h, F> { it: Searcher<'h>, caps: Captures, finder: F, } #[cfg(feature = "alloc")] impl<'h, F> TryCapturesIter<'h, F> { /// Return an infallible version of this iterator. /// /// Any item yielded that corresponds to an error results in a panic. This /// is useful if your underlying regex engine is configured in a way that /// it is guaranteed to never return an error. pub fn infallible(self) -> CapturesIter<'h, F> { CapturesIter(self) } } #[cfg(feature = "alloc")] impl<'h, F> Iterator for TryCapturesIter<'h, F> where F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, { type Item = Result<Captures, MatchError>; #[inline] fn next(&mut self) -> Option<Result<Captures, MatchError>> { let TryCapturesIter { ref mut it, ref mut caps, ref mut finder } = *self; let result = it .try_advance(|input| { (finder)(input, caps)?; Ok(caps.get_match()) }) .transpose()?; match result { Ok(_) => Some(Ok(caps.clone())), Err(err) => Some(Err(err)), } } } #[cfg(feature = "alloc")] impl<'h, F> core::fmt::Debug for TryCapturesIter<'h, F> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("TryCapturesIter") .field("it", &self.it) .field("caps", &self.caps) .field("finder", &"<closure>") .finish() } } /// An iterator over all non-overlapping captures for an infallible search. /// /// The iterator yields a [`Captures`] value until no more matches could be /// found. /// /// The type parameters are as follows: /// /// * `F` represents the type of a closure that executes the search. /// /// The lifetime parameters come from the [`Input`] type: /// /// * `'h` is the lifetime of the underlying haystack. /// /// When possible, prefer the iterators defined on the regex engine you're /// using. This tries to abstract over the regex engine and is thus a bit more /// unwieldy to use. /// /// This iterator is created by [`Searcher::into_captures_iter`] and then /// calling [`TryCapturesIter::infallible`]. #[cfg(feature = "alloc")] #[derive(Debug)] pub struct CapturesIter<'h, F>(TryCapturesIter<'h, F>); #[cfg(feature = "alloc")] impl<'h, F> Iterator for CapturesIter<'h, F> where F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, { type Item = Captures; #[inline] fn next(&mut self) -> Option<Captures> { match self.0.next()? { Ok(m) => Some(m), Err(err) => panic!( "unexpected regex captures error: {}\n\ to handle find errors, use 'try' or 'search' methods", err, ), } } } <file_sep>/regex-cli/cmd/find/which/mod.rs use std::io::{stdout, Write}; use { anyhow::Context, lexopt::Parser, regex_automata::{Input, MatchError, PatternID, PatternSet}, }; use crate::{ args, util::{self, Table}, }; mod dfa; mod nfa; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a 'which' search. This type of search reports *only* which patterns match a haystack. It doesn't report positions or even how many times each pattern matches. (Therefore, the -c/--count flag doesn't work with this command.) It is generally expected to use '--match-kind all' with this command, as the intent is to report all overlapping matches. Note that the search will usually scan the entire haystack. It can sometimes short circuit if all patterns are anchored or if the search knows no more patterns will match. This type of search is somewhat of a legacy feature because of how the top-level RegexSet API works in the 'regex' crate. Its API is pretty limited and it is difficult to extend to the more flexible meta regex API in regex-automata. The 'backtrack' engine isn't supported here because it doesn't have a 'which' search routine. In theory it could, but it would likely be slow and no better than just running each regex over the haystack one at a time. The 'onepass' engine also does not support this API. (At least, not currently.) USAGE: regex-cli find which <engine> ENGINES: dense Search with the dense DFA regex engine. hybrid Search with the lazy DFA regex engine. meta Search with the meta regex engine. pikevm Search with the PikeVM regex engine. regexset Search with the top-level API regex engine. sparse Search with the sparse DFA regex engine. "; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "dense" => dfa::run_dense(p), "hybrid" => dfa::run_hybrid(p), "meta" => run_meta(p), "pikevm" => nfa::run_pikevm(p), "regex" => run_regex(p), "sparse" => dfa::run_sparse(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_regex(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the top-level API regex engine. USAGE: regex-cli find match regex [-p <pattern> ...] <haystack-path> regex-cli find match regex [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut api = args::api::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut haystack, &mut syntax, &mut api, &mut find, ], )?; anyhow::ensure!( !find.count, "'which' command does not support reporting counts", ); let pats = patterns.get()?; let syn = syntax.syntax()?; let mut table = Table::empty(); let (re, time) = util::timeitr(|| api.from_patterns_set(&syn, &pats))?; table.add("build regex time", time); // The top-level API doesn't support regex-automata's more granular Input // abstraction. let input = args::input::Config::default(); let search = |input: &Input<'_>, patset: &mut PatternSet| { let matches = re.matches(input.haystack()); for pid in matches.iter() { let pid = PatternID::new(pid).unwrap(); patset.try_insert(pid).unwrap(); } Ok(()) }; run_search( &mut table, &common, &find, &input, &haystack, re.len(), search, )?; Ok(()) } fn run_meta(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the meta regex engine. USAGE: regex-cli find match meta [-p <pattern> ...] <haystack-path> regex-cli find match meta [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut meta = args::meta::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut meta, &mut find, ], )?; anyhow::ensure!( !find.count, "'which' command does not support reporting counts", ); let pats = patterns.get()?; let mut table = Table::empty(); let re = if meta.build_from_patterns() { let (re, time) = util::timeitr(|| meta.from_patterns(&syntax, &pats))?; table.add("build meta time", time); re } else { let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (re, time) = util::timeitr(|| meta.from_hirs(&hirs))?; table.add("build meta time", time); re }; let search = |input: &Input<'_>, patset: &mut PatternSet| { Ok(re.which_overlapping_matches(input, patset)) }; run_search( &mut table, &common, &find, &input, &haystack, re.pattern_len(), search, )?; Ok(()) } /// Like `run_counts`, but prints the actual matches instead. fn run_search( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, pattern_len: usize, mut search: impl FnMut(&Input<'_>, &mut PatternSet) -> Result<(), MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (patset, time) = util::timeitr(|| { let mut patset = PatternSet::new(pattern_len); for _ in 0..find.repeat() { search(&input, &mut patset)?; } Ok::<_, anyhow::Error>(patset) })?; table.add("search time", time); table.add("patterns that matched", patset.len()); if common.table() { table.print(&mut out)?; } if !common.quiet { for i in 0..pattern_len { let pid = PatternID::new(i).context("invalid pattern ID")?; writeln!( out, "{}:{:?}", pid.as_usize(), patset.contains(pid) )?; } } Ok(()) }) } <file_sep>/regex-automata/src/meta/strategy.rs use core::{ fmt::Debug, panic::{RefUnwindSafe, UnwindSafe}, }; use alloc::sync::Arc; use regex_syntax::hir::{literal, Hir}; use crate::{ meta::{ error::{BuildError, RetryError, RetryFailError, RetryQuadraticError}, regex::{Cache, RegexInfo}, reverse_inner, wrappers, }, nfa::thompson::{self, WhichCaptures, NFA}, util::{ captures::{Captures, GroupInfo}, look::LookMatcher, prefilter::{self, Prefilter, PrefilterI}, primitives::{NonMaxUsize, PatternID}, search::{Anchored, HalfMatch, Input, Match, MatchKind, PatternSet}, }, }; /// A trait that represents a single meta strategy. Its main utility is in /// providing a way to do dynamic dispatch over a few choices. /// /// Why dynamic dispatch? I actually don't have a super compelling reason, and /// importantly, I have not benchmarked it with the main alternative: an enum. /// I went with dynamic dispatch initially because the regex engine search code /// really can't be inlined into caller code in most cases because it's just /// too big. In other words, it is already expected that every regex search /// will entail at least the cost of a function call. /// /// I do wonder whether using enums would result in better codegen overall /// though. It's a worthwhile experiment to try. Probably the most interesting /// benchmark to run in such a case would be one with a high match count. That /// is, a benchmark to test the overall latency of a search call. pub(super) trait Strategy: Debug + Send + Sync + RefUnwindSafe + UnwindSafe + 'static { fn group_info(&self) -> &GroupInfo; fn create_cache(&self) -> Cache; fn reset_cache(&self, cache: &mut Cache); fn is_accelerated(&self) -> bool; fn memory_usage(&self) -> usize; fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match>; fn search_half( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch>; fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool; fn search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID>; fn which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ); } pub(super) fn new( info: &RegexInfo, hirs: &[&Hir], ) -> Result<Arc<dyn Strategy>, BuildError> { // At this point, we're committed to a regex engine of some kind. So pull // out a prefilter if we can, which will feed to each of the constituent // regex engines. let pre = if info.is_always_anchored_start() { // PERF: I'm not sure we necessarily want to do this... We may want to // run a prefilter for quickly rejecting in some cases. The problem // is that anchored searches overlap quite a bit with the use case // of "run a regex on every line to extract data." In that case, the // regex always matches, so running a prefilter doesn't really help us // there. The main place where a prefilter helps in an anchored search // is if the anchored search is not expected to match frequently. That // is, the prefilter gives us a way to possibly reject a haystack very // quickly. // // Maybe we should do use a prefilter, but only for longer haystacks? // Or maybe we should only use a prefilter when we think it's "fast"? // // Interestingly, I think we currently lack the infrastructure for // disabling a prefilter based on haystack length. That would probably // need to be a new 'Input' option. (Interestingly, an 'Input' used to // carry a 'Prefilter' with it, but I moved away from that.) debug!("skipping literal extraction since regex is anchored"); None } else if let Some(pre) = info.config().get_prefilter() { debug!( "skipping literal extraction since the caller provided a prefilter" ); Some(pre.clone()) } else if info.config().get_auto_prefilter() { let kind = info.config().get_match_kind(); let prefixes = crate::util::prefilter::prefixes(kind, hirs); // If we can build a full `Strategy` from just the extracted prefixes, // then we can short-circuit and avoid building a regex engine at all. if let Some(pre) = Pre::from_prefixes(info, &prefixes) { debug!( "found that the regex can be broken down to a literal \ search, avoiding the regex engine entirely", ); return Ok(pre); } // This now attempts another short-circuit of the regex engine: if we // have a huge alternation of just plain literals, then we can just use // Aho-Corasick for that and avoid the regex engine entirely. // // You might think this case would just be handled by // `Pre::from_prefixes`, but that technique relies on heuristic literal // extraction from the corresponding `Hir`. That works, but part of // heuristics limit the size and number of literals returned. This case // will specifically handle patterns with very large alternations. // // One wonders if we should just roll this our heuristic literal // extraction, and then I think this case could disappear entirely. if let Some(pre) = Pre::from_alternation_literals(info, hirs) { debug!( "found plain alternation of literals, \ avoiding regex engine entirely and using Aho-Corasick" ); return Ok(pre); } prefixes.literals().and_then(|strings| { debug!( "creating prefilter from {} literals: {:?}", strings.len(), strings, ); Prefilter::new(kind, strings) }) } else { debug!("skipping literal extraction since prefilters were disabled"); None }; let mut core = Core::new(info.clone(), pre.clone(), hirs)?; // Now that we have our core regex engines built, there are a few cases // where we can do a little bit better than just a normal "search forward // and maybe use a prefilter when in a start state." However, these cases // may not always work or otherwise build on top of the Core searcher. // For example, the reverse anchored optimization seems like it might // always work, but only the DFAs support reverse searching and the DFAs // might give up or quit for reasons. If we had, e.g., a PikeVM that // supported reverse searching, then we could avoid building a full Core // engine for this case. core = match ReverseAnchored::new(core) { Err(core) => core, Ok(ra) => { debug!("using reverse anchored strategy"); return Ok(Arc::new(ra)); } }; core = match ReverseSuffix::new(core, hirs) { Err(core) => core, Ok(rs) => { debug!("using reverse suffix strategy"); return Ok(Arc::new(rs)); } }; core = match ReverseInner::new(core, hirs) { Err(core) => core, Ok(ri) => { debug!("using reverse inner strategy"); return Ok(Arc::new(ri)); } }; debug!("using core strategy"); Ok(Arc::new(core)) } #[derive(Clone, Debug)] struct Pre<P> { pre: P, group_info: GroupInfo, } impl<P: PrefilterI> Pre<P> { fn new(pre: P) -> Arc<dyn Strategy> { // The only thing we support when we use prefilters directly as a // strategy is the start and end of the overall match for a single // pattern. In other words, exactly one implicit capturing group. Which // is exactly what we use here for a GroupInfo. let group_info = GroupInfo::new([[None::<&str>]]).unwrap(); Arc::new(Pre { pre, group_info }) } } // This is a little weird, but we don't actually care about the type parameter // here because we're selecting which underlying prefilter to use. So we just // define it on an arbitrary type. impl Pre<()> { /// Given a sequence of prefixes, attempt to return a full `Strategy` using /// just the prefixes. /// /// Basically, this occurs when the prefixes given not just prefixes, /// but an enumeration of the entire language matched by the regular /// expression. /// /// A number of other conditions need to be true too. For example, there /// can be only one pattern, the number of explicit capture groups is 0, no /// look-around assertions and so on. /// /// Note that this ignores `Config::get_auto_prefilter` because if this /// returns something, then it isn't a prefilter but a matcher itself. /// Therefore, it shouldn't suffer from the problems typical to prefilters /// (such as a high false positive rate). fn from_prefixes( info: &RegexInfo, prefixes: &literal::Seq, ) -> Option<Arc<dyn Strategy>> { let kind = info.config().get_match_kind(); // Check to see if our prefixes are exact, which means we might be // able to bypass the regex engine entirely and just rely on literal // searches. if !prefixes.is_exact() { return None; } // We also require that we have a single regex pattern. Namely, // we reuse the prefilter infrastructure to implement search and // prefilters only report spans. Prefilters don't know about pattern // IDs. The multi-regex case isn't a lost cause, we might still use // Aho-Corasick and we might still just use a regular prefilter, but // that's done below. if info.pattern_len() != 1 { return None; } // We can't have any capture groups either. The literal engines don't // know how to deal with things like '(foo)(bar)'. In that case, a // prefilter will just be used and then the regex engine will resolve // the capture groups. if info.props()[0].explicit_captures_len() != 0 { return None; } // We also require that it has zero look-around assertions. Namely, // literal extraction treats look-around assertions as if they match // *every* empty string. But of course, that isn't true. So for // example, 'foo\bquux' never matches anything, but 'fooquux' is // extracted from that as an exact literal. Such cases should just run // the regex engine. 'fooquux' will be used as a normal prefilter, and // then the regex engine will try to look for an actual match. if !info.props()[0].look_set().is_empty() { return None; } // Finally, currently, our prefilters are all oriented around // leftmost-first match semantics, so don't try to use them if the // caller asked for anything else. if kind != MatchKind::LeftmostFirst { return None; } // The above seems like a lot of requirements to meet, but it applies // to a lot of cases. 'foo', '[abc][123]' and 'foo|bar|quux' all meet // the above criteria, for example. // // Note that this is effectively a latency optimization. If we didn't // do this, then the extracted literals would still get bundled into // a prefilter, and every regex engine capable of running unanchored // searches supports prefilters. So this optimization merely sidesteps // having to run the regex engine at all to confirm the match. Thus, it // decreases the latency of a match. // OK because we know the set is exact and thus finite. let prefixes = prefixes.literals().unwrap(); debug!( "trying to bypass regex engine by creating \ prefilter from {} literals: {:?}", prefixes.len(), prefixes, ); let choice = match prefilter::Choice::new(kind, prefixes) { Some(choice) => choice, None => { debug!( "regex bypass failed because no prefilter could be built" ); return None; } }; let strat: Arc<dyn Strategy> = match choice { prefilter::Choice::Memchr(pre) => Pre::new(pre), prefilter::Choice::Memchr2(pre) => Pre::new(pre), prefilter::Choice::Memchr3(pre) => Pre::new(pre), prefilter::Choice::Memmem(pre) => Pre::new(pre), prefilter::Choice::Teddy(pre) => Pre::new(pre), prefilter::Choice::ByteSet(pre) => Pre::new(pre), prefilter::Choice::AhoCorasick(pre) => Pre::new(pre), }; Some(strat) } /// Attempts to extract an alternation of literals, and if it's deemed /// worth doing, returns an Aho-Corasick prefilter as a strategy. /// /// And currently, this only returns something when 'hirs.len() == 1'. This /// could in theory do something if there are multiple HIRs where all of /// them are alternation of literals, but I haven't had the time to go down /// that path yet. fn from_alternation_literals( info: &RegexInfo, hirs: &[&Hir], ) -> Option<Arc<dyn Strategy>> { use crate::util::prefilter::AhoCorasick; let lits = crate::meta::literal::alternation_literals(info, hirs)?; let ac = AhoCorasick::new(MatchKind::LeftmostFirst, &lits)?; Some(Pre::new(ac)) } } // This implements Strategy for anything that implements PrefilterI. // // Note that this must only be used for regexes of length 1. Multi-regexes // don't work here. The prefilter interface only provides the span of a match // and not the pattern ID. (I did consider making it more expressive, but I // couldn't figure out how to tie everything together elegantly.) Thus, so long // as the regex only contains one pattern, we can simply assume that a match // corresponds to PatternID::ZERO. And indeed, that's what we do here. // // In practice, since this impl is used to report matches directly and thus // completely bypasses the regex engine, we only wind up using this under the // following restrictions: // // * There must be only one pattern. As explained above. // * The literal sequence must be finite and only contain exact literals. // * There must not be any look-around assertions. If there are, the literals // extracted might be exact, but a match doesn't necessarily imply an overall // match. As a trivial example, 'foo\bbar' does not match 'foobar'. // * The pattern must not have any explicit capturing groups. If it does, the // caller might expect them to be resolved. e.g., 'foo(bar)'. // // So when all of those things are true, we use a prefilter directly as a // strategy. // // In the case where the number of patterns is more than 1, we don't use this // but do use a special Aho-Corasick strategy if all of the regexes are just // simple literals or alternations of literals. (We also use the Aho-Corasick // strategy when len(patterns)==1 if the number of literals is large. In that // case, literal extraction gives up and will return an infinite set.) impl<P: PrefilterI> Strategy for Pre<P> { fn group_info(&self) -> &GroupInfo { &self.group_info } fn create_cache(&self) -> Cache { Cache { capmatches: Captures::all(self.group_info().clone()), pikevm: wrappers::PikeVMCache::none(), backtrack: wrappers::BoundedBacktrackerCache::none(), onepass: wrappers::OnePassCache::none(), hybrid: wrappers::HybridCache::none(), revhybrid: wrappers::ReverseHybridCache::none(), } } fn reset_cache(&self, _cache: &mut Cache) {} fn is_accelerated(&self) -> bool { self.pre.is_fast() } fn memory_usage(&self) -> usize { self.pre.memory_usage() } fn search(&self, _cache: &mut Cache, input: &Input<'_>) -> Option<Match> { if input.is_done() { return None; } if input.get_anchored().is_anchored() { return self .pre .prefix(input.haystack(), input.get_span()) .map(|sp| Match::new(PatternID::ZERO, sp)); } self.pre .find(input.haystack(), input.get_span()) .map(|sp| Match::new(PatternID::ZERO, sp)) } fn search_half( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch> { self.search(cache, input).map(|m| HalfMatch::new(m.pattern(), m.end())) } fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { self.search(cache, input).is_some() } fn search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { let m = self.search(cache, input)?; if let Some(slot) = slots.get_mut(0) { *slot = NonMaxUsize::new(m.start()); } if let Some(slot) = slots.get_mut(1) { *slot = NonMaxUsize::new(m.end()); } Some(m.pattern()) } fn which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { if self.search(cache, input).is_some() { patset.insert(PatternID::ZERO); } } } #[derive(Debug)] struct Core { info: RegexInfo, pre: Option<Prefilter>, nfa: NFA, nfarev: Option<NFA>, pikevm: wrappers::PikeVM, backtrack: wrappers::BoundedBacktracker, onepass: wrappers::OnePass, hybrid: wrappers::Hybrid, dfa: wrappers::DFA, } impl Core { fn new( info: RegexInfo, pre: Option<Prefilter>, hirs: &[&Hir], ) -> Result<Core, BuildError> { let mut lookm = LookMatcher::new(); lookm.set_line_terminator(info.config().get_line_terminator()); let thompson_config = thompson::Config::new() .utf8(info.config().get_utf8_empty()) .nfa_size_limit(info.config().get_nfa_size_limit()) .shrink(false) .which_captures(info.config().get_which_captures()) .look_matcher(lookm); let nfa = thompson::Compiler::new() .configure(thompson_config.clone()) .build_many_from_hir(hirs) .map_err(BuildError::nfa)?; // It's possible for the PikeVM or the BB to fail to build, even though // at this point, we already have a full NFA in hand. They can fail // when a Unicode word boundary is used but where Unicode word boundary // support is disabled at compile time, thus making it impossible to // match. (Construction can also fail if the NFA was compiled without // captures, but we always enable that above.) let pikevm = wrappers::PikeVM::new(&info, pre.clone(), &nfa)?; let backtrack = wrappers::BoundedBacktracker::new(&info, pre.clone(), &nfa)?; // The onepass engine can of course fail to build, but we expect it to // fail in many cases because it is an optimization that doesn't apply // to all regexes. The 'OnePass' wrapper encapsulates this failure (and // logs a message if it occurs). let onepass = wrappers::OnePass::new(&info, &nfa); // We try to encapsulate whether a particular regex engine should be // used within each respective wrapper, but the DFAs need a reverse NFA // to build itself, and we really do not want to build a reverse NFA if // we know we aren't going to use the lazy DFA. So we do a config check // up front, which is in practice the only way we won't try to use the // DFA. let (nfarev, hybrid, dfa) = if !info.config().get_hybrid() && !info.config().get_dfa() { (None, wrappers::Hybrid::none(), wrappers::DFA::none()) } else { // FIXME: Technically, we don't quite yet KNOW that we need // a reverse NFA. It's possible for the DFAs below to both // fail to build just based on the forward NFA. In which case, // building the reverse NFA was totally wasted work. But... // fixing this requires breaking DFA construction apart into // two pieces: one for the forward part and another for the // reverse part. Quite annoying. Making it worse, when building // both DFAs fails, it's quite likely that the NFA is large and // that it will take quite some time to build the reverse NFA // too. So... it's really probably worth it to do this! let nfarev = thompson::Compiler::new() // Currently, reverse NFAs don't support capturing groups, // so we MUST disable them. But even if we didn't have to, // we would, because nothing in this crate does anything // useful with capturing groups in reverse. And of course, // the lazy DFA ignores capturing groups in all cases. .configure( thompson_config .clone() .which_captures(WhichCaptures::None) .reverse(true), ) .build_many_from_hir(hirs) .map_err(BuildError::nfa)?; let dfa = if !info.config().get_dfa() { wrappers::DFA::none() } else { wrappers::DFA::new(&info, pre.clone(), &nfa, &nfarev) }; let hybrid = if !info.config().get_hybrid() { wrappers::Hybrid::none() } else if dfa.is_some() { debug!("skipping lazy DFA because we have a full DFA"); wrappers::Hybrid::none() } else { wrappers::Hybrid::new(&info, pre.clone(), &nfa, &nfarev) }; (Some(nfarev), hybrid, dfa) }; Ok(Core { info, pre, nfa, nfarev, pikevm, backtrack, onepass, hybrid, dfa, }) } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_mayfail( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<Result<Option<Match>, RetryFailError>> { if let Some(e) = self.dfa.get(input) { trace!("using full DFA for search at {:?}", input.get_span()); Some(e.try_search(input)) } else if let Some(e) = self.hybrid.get(input) { trace!("using lazy DFA for search at {:?}", input.get_span()); Some(e.try_search(&mut cache.hybrid, input)) } else { None } } fn search_nofail( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<Match> { let caps = &mut cache.capmatches; caps.set_pattern(None); // We manually inline 'try_search_slots_nofail' here because we need to // borrow from 'cache.capmatches' in this method, but if we do, then // we can't pass 'cache' wholesale to to 'try_slots_no_hybrid'. It's a // classic example of how the borrow checker inhibits decomposition. // There are of course work-arounds (more types and/or interior // mutability), but that's more annoying than this IMO. let pid = if let Some(ref e) = self.onepass.get(input) { trace!("using OnePass for search at {:?}", input.get_span()); e.search_slots(&mut cache.onepass, input, caps.slots_mut()) } else if let Some(ref e) = self.backtrack.get(input) { trace!( "using BoundedBacktracker for search at {:?}", input.get_span() ); e.search_slots(&mut cache.backtrack, input, caps.slots_mut()) } else { trace!("using PikeVM for search at {:?}", input.get_span()); let e = self.pikevm.get(); e.search_slots(&mut cache.pikevm, input, caps.slots_mut()) }; caps.set_pattern(pid); caps.get_match() } fn search_half_nofail( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch> { // Only the lazy/full DFA returns half-matches, since the DFA requires // a reverse scan to find the start position. These fallback regex // engines can find the start and end in a single pass, so we just do // that and throw away the start offset to conform to the API. let m = self.search_nofail(cache, input)?; Some(HalfMatch::new(m.pattern(), m.end())) } fn search_slots_nofail( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { if let Some(ref e) = self.onepass.get(input) { trace!( "using OnePass for capture search at {:?}", input.get_span() ); e.search_slots(&mut cache.onepass, input, slots) } else if let Some(ref e) = self.backtrack.get(input) { trace!( "using BoundedBacktracker for capture search at {:?}", input.get_span() ); e.search_slots(&mut cache.backtrack, input, slots) } else { trace!( "using PikeVM for capture search at {:?}", input.get_span() ); let e = self.pikevm.get(); e.search_slots(&mut cache.pikevm, input, slots) } } fn is_match_nofail(&self, cache: &mut Cache, input: &Input<'_>) -> bool { if let Some(ref e) = self.onepass.get(input) { trace!( "using OnePass for is-match search at {:?}", input.get_span() ); e.search_slots(&mut cache.onepass, input, &mut []).is_some() } else if let Some(ref e) = self.backtrack.get(input) { trace!( "using BoundedBacktracker for is-match search at {:?}", input.get_span() ); e.is_match(&mut cache.backtrack, input) } else { trace!( "using PikeVM for is-match search at {:?}", input.get_span() ); let e = self.pikevm.get(); e.is_match(&mut cache.pikevm, input) } } fn is_capture_search_needed(&self, slots_len: usize) -> bool { slots_len > self.nfa.group_info().implicit_slot_len() } } impl Strategy for Core { #[cfg_attr(feature = "perf-inline", inline(always))] fn group_info(&self) -> &GroupInfo { self.nfa.group_info() } #[cfg_attr(feature = "perf-inline", inline(always))] fn create_cache(&self) -> Cache { Cache { capmatches: Captures::all(self.group_info().clone()), pikevm: self.pikevm.create_cache(), backtrack: self.backtrack.create_cache(), onepass: self.onepass.create_cache(), hybrid: self.hybrid.create_cache(), revhybrid: wrappers::ReverseHybridCache::none(), } } #[cfg_attr(feature = "perf-inline", inline(always))] fn reset_cache(&self, cache: &mut Cache) { cache.pikevm.reset(&self.pikevm); cache.backtrack.reset(&self.backtrack); cache.onepass.reset(&self.onepass); cache.hybrid.reset(&self.hybrid); } fn is_accelerated(&self) -> bool { self.pre.as_ref().map_or(false, |pre| pre.is_fast()) } fn memory_usage(&self) -> usize { self.info.memory_usage() + self.pre.as_ref().map_or(0, |pre| pre.memory_usage()) + self.nfa.memory_usage() + self.nfarev.as_ref().map_or(0, |nfa| nfa.memory_usage()) + self.onepass.memory_usage() + self.dfa.memory_usage() } #[cfg_attr(feature = "perf-inline", inline(always))] fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> { // We manually inline try_search_mayfail here because letting the // compiler do it seems to produce pretty crappy codegen. return if let Some(e) = self.dfa.get(input) { trace!("using full DFA for full search at {:?}", input.get_span()); match e.try_search(input) { Ok(x) => x, Err(_err) => { trace!("full DFA search failed: {}", _err); self.search_nofail(cache, input) } } } else if let Some(e) = self.hybrid.get(input) { trace!("using lazy DFA for full search at {:?}", input.get_span()); match e.try_search(&mut cache.hybrid, input) { Ok(x) => x, Err(_err) => { trace!("lazy DFA search failed: {}", _err); self.search_nofail(cache, input) } } } else { self.search_nofail(cache, input) }; } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_half( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch> { // The main difference with 'search' is that if we're using a DFA, we // can use a single forward scan without needing to run the reverse // DFA. if let Some(e) = self.dfa.get(input) { trace!("using full DFA for half search at {:?}", input.get_span()); match e.try_search_half_fwd(input) { Ok(x) => x, Err(_err) => { trace!("full DFA half search failed: {}", _err); self.search_half_nofail(cache, input) } } } else if let Some(e) = self.hybrid.get(input) { trace!("using lazy DFA for half search at {:?}", input.get_span()); match e.try_search_half_fwd(&mut cache.hybrid, input) { Ok(x) => x, Err(_err) => { trace!("lazy DFA half search failed: {}", _err); self.search_half_nofail(cache, input) } } } else { self.search_half_nofail(cache, input) } } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { if let Some(e) = self.dfa.get(input) { trace!( "using full DFA for is-match search at {:?}", input.get_span() ); match e.try_search_half_fwd(input) { Ok(x) => x.is_some(), Err(_err) => { trace!("full DFA half search failed: {}", _err); self.is_match_nofail(cache, input) } } } else if let Some(e) = self.hybrid.get(input) { trace!( "using lazy DFA for is-match search at {:?}", input.get_span() ); match e.try_search_half_fwd(&mut cache.hybrid, input) { Ok(x) => x.is_some(), Err(_err) => { trace!("lazy DFA half search failed: {}", _err); self.is_match_nofail(cache, input) } } } else { self.is_match_nofail(cache, input) } } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { // Even if the regex has explicit capture groups, if the caller didn't // provide any explicit slots, then it doesn't make sense to try and do // extra work to get offsets for those slots. Ideally the caller should // realize this and not call this routine in the first place, but alas, // we try to save the caller from themselves if they do. if !self.is_capture_search_needed(slots.len()) { trace!("asked for slots unnecessarily, trying fast path"); let m = self.search(cache, input)?; copy_match_to_slots(m, slots); return Some(m.pattern()); } // If the onepass DFA is available for this search (which only happens // when it's anchored), then skip running a fallible DFA. The onepass // DFA isn't as fast as a full or lazy DFA, but it is typically quite // a bit faster than the backtracker or the PikeVM. So it isn't as // advantageous to try and do a full/lazy DFA scan first. // // We still theorize that it's better to do a full/lazy DFA scan, even // when it's anchored, because it's usually much faster and permits us // to say "no match" much more quickly. This does hurt the case of, // say, parsing each line in a log file into capture groups, because // in that case, the line always matches. So the lazy DFA scan is // usually just wasted work. But, the lazy DFA is usually quite fast // and doesn't cost too much here. if self.onepass.get(&input).is_some() { return self.search_slots_nofail(cache, &input, slots); } let m = match self.try_search_mayfail(cache, input) { Some(Ok(Some(m))) => m, Some(Ok(None)) => return None, Some(Err(_err)) => { trace!("fast capture search failed: {}", _err); return self.search_slots_nofail(cache, input, slots); } None => { return self.search_slots_nofail(cache, input, slots); } }; // At this point, now that we've found the bounds of the // match, we need to re-run something that can resolve // capturing groups. But we only need to run on it on the // match bounds and not the entire haystack. trace!( "match found at {}..{} in capture search, \ using another engine to find captures", m.start(), m.end(), ); let input = input .clone() .span(m.start()..m.end()) .anchored(Anchored::Pattern(m.pattern())); Some( self.search_slots_nofail(cache, &input, slots) .expect("should find a match"), ) } #[cfg_attr(feature = "perf-inline", inline(always))] fn which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { if let Some(e) = self.dfa.get(input) { trace!( "using full DFA for overlapping search at {:?}", input.get_span() ); let _err = match e.try_which_overlapping_matches(input, patset) { Ok(()) => return, Err(err) => err, }; trace!("fast overlapping search failed: {}", _err); } else if let Some(e) = self.hybrid.get(input) { trace!( "using lazy DFA for overlapping search at {:?}", input.get_span() ); let _err = match e.try_which_overlapping_matches( &mut cache.hybrid, input, patset, ) { Ok(()) => { return; } Err(err) => err, }; trace!("fast overlapping search failed: {}", _err); } trace!( "using PikeVM for overlapping search at {:?}", input.get_span() ); let e = self.pikevm.get(); e.which_overlapping_matches(&mut cache.pikevm, input, patset) } } #[derive(Debug)] struct ReverseAnchored { core: Core, } impl ReverseAnchored { fn new(core: Core) -> Result<ReverseAnchored, Core> { if !core.info.is_always_anchored_end() { debug!( "skipping reverse anchored optimization because \ the regex is not always anchored at the end" ); return Err(core); } // Note that the caller can still request an anchored search even when // the regex isn't anchored at the start. We detect that case in the // search routines below and just fallback to the core engine. This // is fine because both searches are anchored. It's just a matter of // picking one. Falling back to the core engine is a little simpler, // since if we used the reverse anchored approach, we'd have to add an // extra check to ensure the match reported starts at the place where // the caller requested the search to start. if core.info.is_always_anchored_start() { debug!( "skipping reverse anchored optimization because \ the regex is also anchored at the start" ); return Err(core); } // Only DFAs can do reverse searches (currently), so we need one of // them in order to do this optimization. It's possible (although // pretty unlikely) that we have neither and need to give up. if !core.hybrid.is_some() && !core.dfa.is_some() { debug!( "skipping reverse anchored optimization because \ we don't have a lazy DFA or a full DFA" ); return Err(core); } Ok(ReverseAnchored { core }) } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_half_anchored_rev( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, RetryFailError> { // We of course always want an anchored search. In theory, the // underlying regex engines should automatically enable anchored // searches since the regex is itself anchored, but this more clearly // expresses intent and is always correct. let input = input.clone().anchored(Anchored::Yes); if let Some(e) = self.core.dfa.get(&input) { trace!( "using full DFA for reverse anchored search at {:?}", input.get_span() ); e.try_search_half_rev(&input) } else if let Some(e) = self.core.hybrid.get(&input) { trace!( "using lazy DFA for reverse anchored search at {:?}", input.get_span() ); e.try_search_half_rev(&mut cache.hybrid, &input) } else { unreachable!("ReverseAnchored always has a DFA") } } } // Note that in this impl, we don't check that 'input.end() == // input.haystack().len()'. In particular, when that condition is false, a // match is always impossible because we know that the regex is always anchored // at the end (or else 'ReverseAnchored' won't be built). We don't check that // here because the 'Regex' wrapper actually does that for us in all cases. // Thus, in this impl, we can actually assume that the end position in 'input' // is equivalent to the length of the haystack. impl Strategy for ReverseAnchored { #[cfg_attr(feature = "perf-inline", inline(always))] fn group_info(&self) -> &GroupInfo { self.core.group_info() } #[cfg_attr(feature = "perf-inline", inline(always))] fn create_cache(&self) -> Cache { self.core.create_cache() } #[cfg_attr(feature = "perf-inline", inline(always))] fn reset_cache(&self, cache: &mut Cache) { self.core.reset_cache(cache); } fn is_accelerated(&self) -> bool { // Since this is anchored at the end, a reverse anchored search is // almost certainly guaranteed to result in a much faster search than // a standard forward search. true } fn memory_usage(&self) -> usize { self.core.memory_usage() } #[cfg_attr(feature = "perf-inline", inline(always))] fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> { if input.get_anchored().is_anchored() { return self.core.search(cache, input); } match self.try_search_half_anchored_rev(cache, input) { Err(_err) => { trace!("fast reverse anchored search failed: {}", _err); self.core.search_nofail(cache, input) } Ok(None) => None, Ok(Some(hm)) => { Some(Match::new(hm.pattern(), hm.offset()..input.end())) } } } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_half( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch> { if input.get_anchored().is_anchored() { return self.core.search_half(cache, input); } match self.try_search_half_anchored_rev(cache, input) { Err(_err) => { trace!("fast reverse anchored search failed: {}", _err); self.core.search_half_nofail(cache, input) } Ok(None) => None, Ok(Some(hm)) => { // Careful here! 'try_search_half' is a *forward* search that // only cares about the *end* position of a match. But // 'hm.offset()' is actually the start of the match. So we // actually just throw that away here and, since we know we // have a match, return the only possible position at which a // match can occur: input.end(). Some(HalfMatch::new(hm.pattern(), input.end())) } } } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { if input.get_anchored().is_anchored() { return self.core.is_match(cache, input); } match self.try_search_half_anchored_rev(cache, input) { Err(_err) => { trace!("fast reverse anchored search failed: {}", _err); self.core.is_match_nofail(cache, input) } Ok(None) => false, Ok(Some(_)) => true, } } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { if input.get_anchored().is_anchored() { return self.core.search_slots(cache, input, slots); } match self.try_search_half_anchored_rev(cache, input) { Err(_err) => { trace!("fast reverse anchored search failed: {}", _err); self.core.search_slots_nofail(cache, input, slots) } Ok(None) => None, Ok(Some(hm)) => { if !self.core.is_capture_search_needed(slots.len()) { trace!("asked for slots unnecessarily, skipping captures"); let m = Match::new(hm.pattern(), hm.offset()..input.end()); copy_match_to_slots(m, slots); return Some(m.pattern()); } let start = hm.offset(); let input = input .clone() .span(start..input.end()) .anchored(Anchored::Pattern(hm.pattern())); self.core.search_slots_nofail(cache, &input, slots) } } } #[cfg_attr(feature = "perf-inline", inline(always))] fn which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { // It seems like this could probably benefit from a reverse anchored // optimization, perhaps by doing an overlapping reverse search (which // the DFAs do support). I haven't given it much thought though, and // I'm currently focus more on the single pattern case. self.core.which_overlapping_matches(cache, input, patset) } } #[derive(Debug)] struct ReverseSuffix { core: Core, pre: Prefilter, } impl ReverseSuffix { fn new(core: Core, hirs: &[&Hir]) -> Result<ReverseSuffix, Core> { if !core.info.config().get_auto_prefilter() { debug!( "skipping reverse suffix optimization because \ automatic prefilters are disabled" ); return Err(core); } // Like the reverse inner optimization, we don't do this for regexes // that are always anchored. It could lead to scanning too much, but // could say "no match" much more quickly than running the regex // engine if the initial literal scan doesn't match. With that said, // the reverse suffix optimization has lower overhead, since it only // requires a reverse scan after a literal match to confirm or reject // the match. (Although, in the case of confirmation, it then needs to // do another forward scan to find the end position.) // // Note that the caller can still request an anchored search even // when the regex isn't anchored. We detect that case in the search // routines below and just fallback to the core engine. Currently this // optimization assumes all searches are unanchored, so if we do want // to enable this optimization for anchored searches, it will need a // little work to support it. if core.info.is_always_anchored_start() { debug!( "skipping reverse suffix optimization because \ the regex is always anchored at the start", ); return Err(core); } // Only DFAs can do reverse searches (currently), so we need one of // them in order to do this optimization. It's possible (although // pretty unlikely) that we have neither and need to give up. if !core.hybrid.is_some() && !core.dfa.is_some() { debug!( "skipping reverse suffix optimization because \ we don't have a lazy DFA or a full DFA" ); return Err(core); } if core.pre.as_ref().map_or(false, |p| p.is_fast()) { debug!( "skipping reverse suffix optimization because \ we already have a prefilter that we think is fast" ); return Err(core); } let kind = core.info.config().get_match_kind(); let suffixes = crate::util::prefilter::suffixes(kind, hirs); let lcs = match suffixes.longest_common_suffix() { None => { debug!( "skipping reverse suffix optimization because \ a longest common suffix could not be found", ); return Err(core); } Some(lcs) if lcs.is_empty() => { debug!( "skipping reverse suffix optimization because \ the longest common suffix is the empty string", ); return Err(core); } Some(lcs) => lcs, }; let pre = match Prefilter::new(kind, &[lcs]) { Some(pre) => pre, None => { debug!( "skipping reverse suffix optimization because \ a prefilter could not be constructed from the \ longest common suffix", ); return Err(core); } }; if !pre.is_fast() { debug!( "skipping reverse suffix optimization because \ while we have a suffix prefilter, it is not \ believed to be 'fast'" ); return Err(core); } Ok(ReverseSuffix { core, pre }) } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_half_start( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, RetryError> { let mut span = input.get_span(); let mut min_start = 0; loop { let litmatch = match self.pre.find(input.haystack(), span) { None => return Ok(None), Some(span) => span, }; trace!("reverse suffix scan found suffix match at {:?}", litmatch); let revinput = input .clone() .anchored(Anchored::Yes) .span(input.start()..litmatch.end); match self .try_search_half_rev_limited(cache, &revinput, min_start)? { None => { if span.start >= span.end { break; } span.start = litmatch.start.checked_add(1).unwrap(); } Some(hm) => return Ok(Some(hm)), } min_start = litmatch.end; } Ok(None) } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_half_fwd( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, RetryFailError> { if let Some(e) = self.core.dfa.get(&input) { trace!( "using full DFA for forward reverse suffix search at {:?}", input.get_span() ); e.try_search_half_fwd(&input) } else if let Some(e) = self.core.hybrid.get(&input) { trace!( "using lazy DFA for forward reverse suffix search at {:?}", input.get_span() ); e.try_search_half_fwd(&mut cache.hybrid, &input) } else { unreachable!("ReverseSuffix always has a DFA") } } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_half_rev_limited( &self, cache: &mut Cache, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { if let Some(e) = self.core.dfa.get(&input) { trace!( "using full DFA for reverse suffix search at {:?}, \ but will be stopped at {} to avoid quadratic behavior", input.get_span(), min_start, ); e.try_search_half_rev_limited(&input, min_start) } else if let Some(e) = self.core.hybrid.get(&input) { trace!( "using lazy DFA for reverse inner search at {:?}, \ but will be stopped at {} to avoid quadratic behavior", input.get_span(), min_start, ); e.try_search_half_rev_limited(&mut cache.hybrid, &input, min_start) } else { unreachable!("ReverseSuffix always has a DFA") } } } impl Strategy for ReverseSuffix { #[cfg_attr(feature = "perf-inline", inline(always))] fn group_info(&self) -> &GroupInfo { self.core.group_info() } #[cfg_attr(feature = "perf-inline", inline(always))] fn create_cache(&self) -> Cache { self.core.create_cache() } #[cfg_attr(feature = "perf-inline", inline(always))] fn reset_cache(&self, cache: &mut Cache) { self.core.reset_cache(cache); } fn is_accelerated(&self) -> bool { self.pre.is_fast() } fn memory_usage(&self) -> usize { self.core.memory_usage() + self.pre.memory_usage() } #[cfg_attr(feature = "perf-inline", inline(always))] fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> { if input.get_anchored().is_anchored() { return self.core.search(cache, input); } match self.try_search_half_start(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!("reverse suffix optimization failed: {}", _err); self.core.search(cache, input) } Err(RetryError::Fail(_err)) => { trace!("reverse suffix reverse fast search failed: {}", _err); self.core.search_nofail(cache, input) } Ok(None) => None, Ok(Some(hm_start)) => { let fwdinput = input .clone() .anchored(Anchored::Pattern(hm_start.pattern())) .span(hm_start.offset()..input.end()); match self.try_search_half_fwd(cache, &fwdinput) { Err(_err) => { trace!( "reverse suffix forward fast search failed: {}", _err ); self.core.search_nofail(cache, input) } Ok(None) => { unreachable!( "suffix match plus reverse match implies \ there must be a match", ) } Ok(Some(hm_end)) => Some(Match::new( hm_start.pattern(), hm_start.offset()..hm_end.offset(), )), } } } } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_half( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch> { if input.get_anchored().is_anchored() { return self.core.search_half(cache, input); } match self.try_search_half_start(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!("reverse suffix half optimization failed: {}", _err); self.core.search_half(cache, input) } Err(RetryError::Fail(_err)) => { trace!( "reverse suffix reverse fast half search failed: {}", _err ); self.core.search_half_nofail(cache, input) } Ok(None) => None, Ok(Some(hm_start)) => { // This is a bit subtle. It is tempting to just stop searching // at this point and return a half-match with an offset // corresponding to where the suffix was found. But the suffix // match does not necessarily correspond to the end of the // proper leftmost-first match. Consider /[a-z]+ing/ against // 'tingling'. The first suffix match is the first 'ing', and // the /[a-z]+/ matches the 't'. So if we stopped here, then // we'd report 'ting' as the match. But 'tingling' is the // correct match because of greediness. let fwdinput = input .clone() .anchored(Anchored::Pattern(hm_start.pattern())) .span(hm_start.offset()..input.end()); match self.try_search_half_fwd(cache, &fwdinput) { Err(_err) => { trace!( "reverse suffix forward fast search failed: {}", _err ); self.core.search_half_nofail(cache, input) } Ok(None) => { unreachable!( "suffix match plus reverse match implies \ there must be a match", ) } Ok(Some(hm_end)) => Some(hm_end), } } } } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { if input.get_anchored().is_anchored() { return self.core.is_match(cache, input); } match self.try_search_half_start(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!("reverse suffix half optimization failed: {}", _err); self.core.is_match_nofail(cache, input) } Err(RetryError::Fail(_err)) => { trace!( "reverse suffix reverse fast half search failed: {}", _err ); self.core.is_match_nofail(cache, input) } Ok(None) => false, Ok(Some(_)) => true, } } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { if input.get_anchored().is_anchored() { return self.core.search_slots(cache, input, slots); } if !self.core.is_capture_search_needed(slots.len()) { trace!("asked for slots unnecessarily, trying fast path"); let m = self.search(cache, input)?; copy_match_to_slots(m, slots); return Some(m.pattern()); } let hm_start = match self.try_search_half_start(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!( "reverse suffix captures optimization failed: {}", _err ); return self.core.search_slots(cache, input, slots); } Err(RetryError::Fail(_err)) => { trace!( "reverse suffix reverse fast captures search failed: {}", _err ); return self.core.search_slots_nofail(cache, input, slots); } Ok(None) => return None, Ok(Some(hm_start)) => hm_start, }; trace!( "match found at {}..{} in capture search, \ using another engine to find captures", hm_start.offset(), input.end(), ); let start = hm_start.offset(); let input = input .clone() .span(start..input.end()) .anchored(Anchored::Pattern(hm_start.pattern())); self.core.search_slots_nofail(cache, &input, slots) } #[cfg_attr(feature = "perf-inline", inline(always))] fn which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { self.core.which_overlapping_matches(cache, input, patset) } } #[derive(Debug)] struct ReverseInner { core: Core, preinner: Prefilter, nfarev: NFA, hybrid: wrappers::ReverseHybrid, dfa: wrappers::ReverseDFA, } impl ReverseInner { fn new(core: Core, hirs: &[&Hir]) -> Result<ReverseInner, Core> { if !core.info.config().get_auto_prefilter() { debug!( "skipping reverse inner optimization because \ automatic prefilters are disabled" ); return Err(core); } // Currently we hard-code the assumption of leftmost-first match // semantics. This isn't a huge deal because 'all' semantics tend to // only be used for forward overlapping searches with multiple regexes, // and this optimization only supports a single pattern at the moment. if core.info.config().get_match_kind() != MatchKind::LeftmostFirst { debug!( "skipping reverse inner optimization because \ match kind is {:?} but this only supports leftmost-first", core.info.config().get_match_kind(), ); return Err(core); } // It's likely that a reverse inner scan has too much overhead for it // to be worth it when the regex is anchored at the start. It is // possible for it to be quite a bit faster if the initial literal // scan fails to detect a match, in which case, we can say "no match" // very quickly. But this could be undesirable, e.g., scanning too far // or when the literal scan matches. If it matches, then confirming the // match requires a reverse scan followed by a forward scan to confirm // or reject, which is a fair bit of work. // // Note that the caller can still request an anchored search even // when the regex isn't anchored. We detect that case in the search // routines below and just fallback to the core engine. Currently this // optimization assumes all searches are unanchored, so if we do want // to enable this optimization for anchored searches, it will need a // little work to support it. if core.info.is_always_anchored_start() { debug!( "skipping reverse inner optimization because \ the regex is always anchored at the start", ); return Err(core); } // Only DFAs can do reverse searches (currently), so we need one of // them in order to do this optimization. It's possible (although // pretty unlikely) that we have neither and need to give up. if !core.hybrid.is_some() && !core.dfa.is_some() { debug!( "skipping reverse inner optimization because \ we don't have a lazy DFA or a full DFA" ); return Err(core); } if core.pre.as_ref().map_or(false, |p| p.is_fast()) { debug!( "skipping reverse inner optimization because \ we already have a prefilter that we think is fast" ); return Err(core); } else if core.pre.is_some() { debug!( "core engine has a prefix prefilter, but it is \ probably not fast, so continuing with attempt to \ use reverse inner prefilter" ); } let (concat_prefix, preinner) = match reverse_inner::extract(hirs) { Some(x) => x, // N.B. the 'extract' function emits debug messages explaining // why we bailed out here. None => return Err(core), }; debug!("building reverse NFA for prefix before inner literal"); let mut lookm = LookMatcher::new(); lookm.set_line_terminator(core.info.config().get_line_terminator()); let thompson_config = thompson::Config::new() .reverse(true) .utf8(core.info.config().get_utf8_empty()) .nfa_size_limit(core.info.config().get_nfa_size_limit()) .shrink(false) .which_captures(WhichCaptures::None) .look_matcher(lookm); let result = thompson::Compiler::new() .configure(thompson_config) .build_from_hir(&concat_prefix); let nfarev = match result { Ok(nfarev) => nfarev, Err(_err) => { debug!( "skipping reverse inner optimization because the \ reverse NFA failed to build: {}", _err, ); return Err(core); } }; debug!("building reverse DFA for prefix before inner literal"); let dfa = if !core.info.config().get_dfa() { wrappers::ReverseDFA::none() } else { wrappers::ReverseDFA::new(&core.info, &nfarev) }; let hybrid = if !core.info.config().get_hybrid() { wrappers::ReverseHybrid::none() } else if dfa.is_some() { debug!( "skipping lazy DFA for reverse inner optimization \ because we have a full DFA" ); wrappers::ReverseHybrid::none() } else { wrappers::ReverseHybrid::new(&core.info, &nfarev) }; Ok(ReverseInner { core, preinner, nfarev, hybrid, dfa }) } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_full( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<Match>, RetryError> { let mut span = input.get_span(); let mut min_match_start = 0; let mut min_pre_start = 0; loop { let litmatch = match self.preinner.find(input.haystack(), span) { None => return Ok(None), Some(span) => span, }; if litmatch.start < min_pre_start { trace!( "found inner prefilter match at {:?}, which starts \ before the end of the last forward scan at {}, \ quitting to avoid quadratic behavior", litmatch, min_pre_start, ); return Err(RetryError::Quadratic(RetryQuadraticError::new())); } trace!("reverse inner scan found inner match at {:?}", litmatch); let revinput = input .clone() .anchored(Anchored::Yes) .span(input.start()..litmatch.start); // Note that in addition to the literal search above scanning past // our minimum start point, this routine can also return an error // as a result of detecting possible quadratic behavior if the // reverse scan goes past the minimum start point. That is, the // literal search might not, but the reverse regex search for the // prefix might! match self.try_search_half_rev_limited( cache, &revinput, min_match_start, )? { None => { if span.start >= span.end { break; } span.start = litmatch.start.checked_add(1).unwrap(); } Some(hm_start) => { let fwdinput = input .clone() .anchored(Anchored::Pattern(hm_start.pattern())) .span(hm_start.offset()..input.end()); match self.try_search_half_fwd_stopat(cache, &fwdinput)? { Err(stopat) => { min_pre_start = stopat; span.start = litmatch.start.checked_add(1).unwrap(); } Ok(hm_end) => { return Ok(Some(Match::new( hm_start.pattern(), hm_start.offset()..hm_end.offset(), ))) } } } } min_match_start = litmatch.end; } Ok(None) } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_half_fwd_stopat( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Result<HalfMatch, usize>, RetryFailError> { if let Some(e) = self.core.dfa.get(&input) { trace!( "using full DFA for forward reverse inner search at {:?}", input.get_span() ); e.try_search_half_fwd_stopat(&input) } else if let Some(e) = self.core.hybrid.get(&input) { trace!( "using lazy DFA for forward reverse inner search at {:?}", input.get_span() ); e.try_search_half_fwd_stopat(&mut cache.hybrid, &input) } else { unreachable!("ReverseInner always has a DFA") } } #[cfg_attr(feature = "perf-inline", inline(always))] fn try_search_half_rev_limited( &self, cache: &mut Cache, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { if let Some(e) = self.dfa.get(&input) { trace!( "using full DFA for reverse inner search at {:?}, \ but will be stopped at {} to avoid quadratic behavior", input.get_span(), min_start, ); e.try_search_half_rev_limited(&input, min_start) } else if let Some(e) = self.hybrid.get(&input) { trace!( "using lazy DFA for reverse inner search at {:?}, \ but will be stopped at {} to avoid quadratic behavior", input.get_span(), min_start, ); e.try_search_half_rev_limited( &mut cache.revhybrid, &input, min_start, ) } else { unreachable!("ReverseInner always has a DFA") } } } impl Strategy for ReverseInner { #[cfg_attr(feature = "perf-inline", inline(always))] fn group_info(&self) -> &GroupInfo { self.core.group_info() } #[cfg_attr(feature = "perf-inline", inline(always))] fn create_cache(&self) -> Cache { let mut cache = self.core.create_cache(); cache.revhybrid = self.hybrid.create_cache(); cache } #[cfg_attr(feature = "perf-inline", inline(always))] fn reset_cache(&self, cache: &mut Cache) { self.core.reset_cache(cache); cache.revhybrid.reset(&self.hybrid); } fn is_accelerated(&self) -> bool { self.preinner.is_fast() } fn memory_usage(&self) -> usize { self.core.memory_usage() + self.preinner.memory_usage() + self.nfarev.memory_usage() + self.dfa.memory_usage() } #[cfg_attr(feature = "perf-inline", inline(always))] fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> { if input.get_anchored().is_anchored() { return self.core.search(cache, input); } match self.try_search_full(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!("reverse inner optimization failed: {}", _err); self.core.search(cache, input) } Err(RetryError::Fail(_err)) => { trace!("reverse inner fast search failed: {}", _err); self.core.search_nofail(cache, input) } Ok(matornot) => matornot, } } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_half( &self, cache: &mut Cache, input: &Input<'_>, ) -> Option<HalfMatch> { if input.get_anchored().is_anchored() { return self.core.search_half(cache, input); } match self.try_search_full(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!("reverse inner half optimization failed: {}", _err); self.core.search_half(cache, input) } Err(RetryError::Fail(_err)) => { trace!("reverse inner fast half search failed: {}", _err); self.core.search_half_nofail(cache, input) } Ok(None) => None, Ok(Some(m)) => Some(HalfMatch::new(m.pattern(), m.end())), } } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { if input.get_anchored().is_anchored() { return self.core.is_match(cache, input); } match self.try_search_full(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!("reverse inner half optimization failed: {}", _err); self.core.is_match_nofail(cache, input) } Err(RetryError::Fail(_err)) => { trace!("reverse inner fast half search failed: {}", _err); self.core.is_match_nofail(cache, input) } Ok(None) => false, Ok(Some(_)) => true, } } #[cfg_attr(feature = "perf-inline", inline(always))] fn search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { if input.get_anchored().is_anchored() { return self.core.search_slots(cache, input, slots); } if !self.core.is_capture_search_needed(slots.len()) { trace!("asked for slots unnecessarily, trying fast path"); let m = self.search(cache, input)?; copy_match_to_slots(m, slots); return Some(m.pattern()); } let m = match self.try_search_full(cache, input) { Err(RetryError::Quadratic(_err)) => { trace!("reverse inner captures optimization failed: {}", _err); return self.core.search_slots(cache, input, slots); } Err(RetryError::Fail(_err)) => { trace!("reverse inner fast captures search failed: {}", _err); return self.core.search_slots_nofail(cache, input, slots); } Ok(None) => return None, Ok(Some(m)) => m, }; trace!( "match found at {}..{} in capture search, \ using another engine to find captures", m.start(), m.end(), ); let input = input .clone() .span(m.start()..m.end()) .anchored(Anchored::Pattern(m.pattern())); self.core.search_slots_nofail(cache, &input, slots) } #[cfg_attr(feature = "perf-inline", inline(always))] fn which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { self.core.which_overlapping_matches(cache, input, patset) } } /// Copies the offsets in the given match to the corresponding positions in /// `slots`. /// /// In effect, this sets the slots corresponding to the implicit group for the /// pattern in the given match. If the indices for the corresponding slots do /// not exist, then no slots are set. /// /// This is useful when the caller provides slots (or captures), but you use a /// regex engine that doesn't operate on slots (like a lazy DFA). This function /// lets you map the match you get back to the slots provided by the caller. #[cfg_attr(feature = "perf-inline", inline(always))] fn copy_match_to_slots(m: Match, slots: &mut [Option<NonMaxUsize>]) { let slot_start = m.pattern().as_usize() * 2; let slot_end = slot_start + 1; if let Some(slot) = slots.get_mut(slot_start) { *slot = NonMaxUsize::new(m.start()); } if let Some(slot) = slots.get_mut(slot_end) { *slot = NonMaxUsize::new(m.end()); } } <file_sep>/regex-automata/src/hybrid/search.rs use crate::{ hybrid::{ dfa::{Cache, OverlappingState, DFA}, id::LazyStateID, }, util::{ prefilter::Prefilter, search::{HalfMatch, Input, MatchError, Span}, }, }; #[inline(never)] pub(crate) fn find_fwd( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { if input.is_done() { return Ok(None); } let pre = if input.get_anchored().is_anchored() { None } else { dfa.get_config().get_prefilter() }; // So what we do here is specialize four different versions of 'find_fwd': // one for each of the combinations for 'has prefilter' and 'is earliest // search'. The reason for doing this is that both of these things require // branches and special handling in some code that can be very hot, // and shaving off as much as we can when we don't need it tends to be // beneficial in ad hoc benchmarks. To see these differences, you often // need a query with a high match count. In other words, specializing these // four routines *tends* to help latency more than throughput. if pre.is_some() { if input.get_earliest() { find_fwd_imp(dfa, cache, input, pre, true) } else { find_fwd_imp(dfa, cache, input, pre, false) } } else { if input.get_earliest() { find_fwd_imp(dfa, cache, input, None, true) } else { find_fwd_imp(dfa, cache, input, None, false) } } } #[cfg_attr(feature = "perf-inline", inline(always))] fn find_fwd_imp( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, pre: Option<&'_ Prefilter>, earliest: bool, ) -> Result<Option<HalfMatch>, MatchError> { // See 'prefilter_restart' docs for explanation. let universal_start = dfa.get_nfa().look_set_prefix_any().is_empty(); let mut mat = None; let mut sid = init_fwd(dfa, cache, input)?; let mut at = input.start(); // This could just be a closure, but then I think it would be unsound // because it would need to be safe to invoke. This way, the lack of safety // is clearer in the code below. macro_rules! next_unchecked { ($sid:expr, $at:expr) => {{ let byte = *input.haystack().get_unchecked($at); dfa.next_state_untagged_unchecked(cache, $sid, byte) }}; } if let Some(ref pre) = pre { let span = Span::from(at..input.end()); match pre.find(input.haystack(), span) { None => return Ok(mat), Some(ref span) => { at = span.start; if !universal_start { sid = prefilter_restart(dfa, cache, &input, at)?; } } } } cache.search_start(at); while at < input.end() { if sid.is_tagged() { cache.search_update(at); sid = dfa .next_state(cache, sid, input.haystack()[at]) .map_err(|_| gave_up(at))?; } else { // SAFETY: There are two safety invariants we need to uphold // here in the loops below: that 'sid' and 'prev_sid' are valid // state IDs for this DFA, and that 'at' is a valid index into // 'haystack'. For the former, we rely on the invariant that // next_state* and start_state_forward always returns a valid state // ID (given a valid state ID in the former case), and that we are // only at this place in the code if 'sid' is untagged. Moreover, // every call to next_state_untagged_unchecked below is guarded by // a check that sid is untagged. For the latter safety invariant, // we always guard unchecked access with a check that 'at' is less // than 'end', where 'end <= haystack.len()'. In the unrolled loop // below, we ensure that 'at' is always in bounds. // // PERF: For justification of omitting bounds checks, it gives us a // ~10% bump in search time. This was used for a benchmark: // // regex-cli find hybrid dfa @bigfile '(?m)^.+$' -UBb // // PERF: For justification for the loop unrolling, we use a few // different tests: // // regex-cli find hybrid dfa @$bigfile '\w{50}' -UBb // regex-cli find hybrid dfa @$bigfile '(?m)^.+$' -UBb // regex-cli find hybrid dfa @$bigfile 'ZQZQZQZQ' -UBb // // And there are three different configurations: // // nounroll: this entire 'else' block vanishes and we just // always use 'dfa.next_state(..)'. // unroll1: just the outer loop below // unroll2: just the inner loop below // unroll3: both the outer and inner loops below // // This results in a matrix of timings for each of the above // regexes with each of the above unrolling configurations: // // '\w{50}' '(?m)^.+$' 'ZQZQZQZQ' // nounroll 1.51s 2.34s 1.51s // unroll1 1.53s 2.32s 1.56s // unroll2 2.22s 1.50s 0.61s // unroll3 1.67s 1.45s 0.61s // // Ideally we'd be able to find a configuration that yields the // best time for all regexes, but alas we settle for unroll3 that // gives us *almost* the best for '\w{50}' and the best for the // other two regexes. // // So what exactly is going on here? The first unrolling (grouping // together runs of untagged transitions) specifically targets // our choice of representation. The second unrolling (grouping // together runs of self-transitions) specifically targets a common // DFA topology. Let's dig in a little bit by looking at our // regexes: // // '\w{50}': This regex spends a lot of time outside of the DFA's // start state matching some part of the '\w' repetition. This // means that it's a bit of a worst case for loop unrolling that // targets self-transitions since the self-transitions in '\w{50}' // are not particularly active for this haystack. However, the // first unrolling (grouping together untagged transitions) // does apply quite well here since very few transitions hit // match/dead/quit/unknown states. It is however worth mentioning // that if start states are configured to be tagged (which you // typically want to do if you have a prefilter), then this regex // actually slows way down because it is constantly ping-ponging // out of the unrolled loop and into the handling of a tagged start // state below. But when start states aren't tagged, the unrolled // loop stays hot. (This is why it's imperative that start state // tagging be disabled when there isn't a prefilter!) // // '(?m)^.+$': There are two important aspects of this regex: 1) // on this haystack, its match count is very high, much higher // than the other two regex and 2) it spends the vast majority // of its time matching '.+'. Since Unicode mode is disabled, // this corresponds to repeatedly following self transitions for // the vast majority of the input. This does benefit from the // untagged unrolling since most of the transitions will be to // untagged states, but the untagged unrolling does more work than // what is actually required. Namely, it has to keep track of the // previous and next state IDs, which I guess requires a bit more // shuffling. This is supported by the fact that nounroll+unroll1 // are both slower than unroll2+unroll3, where the latter has a // loop unrolling that specifically targets self-transitions. // // 'ZQZQZQZQ': This one is very similar to '(?m)^.+$' because it // spends the vast majority of its time in self-transitions for // the (implicit) unanchored prefix. The main difference with // '(?m)^.+$' is that it has a much lower match count. So there // isn't much time spent in the overhead of reporting matches. This // is the primary explainer in the perf difference here. We include // this regex and the former to make sure we have comparison points // with high and low match counts. // // NOTE: I used 'OpenSubtitles2018.raw.sample.en' for 'bigfile'. // // NOTE: In a follow-up, it turns out that the "inner" loop // mentioned above was a pretty big pessimization in some other // cases. Namely, it resulted in too much ping-ponging into and out // of the loop, which resulted in nearly ~2x regressions in search // time when compared to the originally lazy DFA in the regex crate. // So I've removed the second loop unrolling that targets the // self-transition case. let mut prev_sid = sid; while at < input.end() { prev_sid = unsafe { next_unchecked!(sid, at) }; if prev_sid.is_tagged() || at + 3 >= input.end() { core::mem::swap(&mut prev_sid, &mut sid); break; } at += 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if sid.is_tagged() { break; } at += 1; prev_sid = unsafe { next_unchecked!(sid, at) }; if prev_sid.is_tagged() { core::mem::swap(&mut prev_sid, &mut sid); break; } at += 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if sid.is_tagged() { break; } at += 1; } // If we quit out of the code above with an unknown state ID at // any point, then we need to re-compute that transition using // 'next_state', which will do NFA powerset construction for us. if sid.is_unknown() { cache.search_update(at); sid = dfa .next_state(cache, prev_sid, input.haystack()[at]) .map_err(|_| gave_up(at))?; } } if sid.is_tagged() { if sid.is_start() { if let Some(ref pre) = pre { let span = Span::from(at..input.end()); match pre.find(input.haystack(), span) { None => { cache.search_finish(span.end); return Ok(mat); } Some(ref span) => { // We want to skip any update to 'at' below // at the end of this iteration and just // jump immediately back to the next state // transition at the leading position of the // candidate match. // // ... but only if we actually made progress // with our prefilter, otherwise if the start // state has a self-loop, we can get stuck. if span.start > at { at = span.start; if !universal_start { sid = prefilter_restart( dfa, cache, &input, at, )?; } continue; } } } } } else if sid.is_match() { let pattern = dfa.match_pattern(cache, sid, 0); // Since slice ranges are inclusive at the beginning and // exclusive at the end, and since forward searches report // the end, we can return 'at' as-is. This only works because // matches are delayed by 1 byte. So by the time we observe a // match, 'at' has already been set to 1 byte past the actual // match location, which is precisely the exclusive ending // bound of the match. mat = Some(HalfMatch::new(pattern, at)); if earliest { cache.search_finish(at); return Ok(mat); } } else if sid.is_dead() { cache.search_finish(at); return Ok(mat); } else if sid.is_quit() { cache.search_finish(at); return Err(MatchError::quit(input.haystack()[at], at)); } else { debug_assert!(sid.is_unknown()); unreachable!("sid being unknown is a bug"); } } at += 1; } eoi_fwd(dfa, cache, input, &mut sid, &mut mat)?; cache.search_finish(input.end()); Ok(mat) } #[inline(never)] pub(crate) fn find_rev( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { if input.is_done() { return Ok(None); } if input.get_earliest() { find_rev_imp(dfa, cache, input, true) } else { find_rev_imp(dfa, cache, input, false) } } #[cfg_attr(feature = "perf-inline", inline(always))] fn find_rev_imp( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, earliest: bool, ) -> Result<Option<HalfMatch>, MatchError> { let mut mat = None; let mut sid = init_rev(dfa, cache, input)?; // In reverse search, the loop below can't handle the case of searching an // empty slice. Ideally we could write something congruent to the forward // search, i.e., 'while at >= start', but 'start' might be 0. Since we use // an unsigned offset, 'at >= 0' is trivially always true. We could avoid // this extra case handling by using a signed offset, but Rust makes it // annoying to do. So... We just handle the empty case separately. if input.start() == input.end() { eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; return Ok(mat); } let mut at = input.end() - 1; macro_rules! next_unchecked { ($sid:expr, $at:expr) => {{ let byte = *input.haystack().get_unchecked($at); dfa.next_state_untagged_unchecked(cache, $sid, byte) }}; } cache.search_start(at); loop { if sid.is_tagged() { cache.search_update(at); sid = dfa .next_state(cache, sid, input.haystack()[at]) .map_err(|_| gave_up(at))?; } else { // SAFETY: See comments in 'find_fwd' for a safety argument. // // PERF: The comments in 'find_fwd' also provide a justification // from a performance perspective as to 1) why we elide bounds // checks and 2) why we do a specialized version of unrolling // below. The reverse search does have a slightly different // consideration in that most reverse searches tend to be // anchored and on shorter haystacks. However, this still makes a // difference. Take this command for example: // // regex-cli find hybrid regex @$bigfile '(?m)^.+$' -UBb // // (Notice that we use 'find hybrid regex', not 'find hybrid dfa' // like in the justification for the forward direction. The 'regex' // sub-command will find start-of-match and thus run the reverse // direction.) // // Without unrolling below, the above command takes around 3.76s. // But with the unrolling below, we get down to 2.55s. If we keep // the unrolling but add in bounds checks, then we get 2.86s. // // NOTE: I used 'OpenSubtitles2018.raw.sample.en' for 'bigfile'. let mut prev_sid = sid; while at >= input.start() { prev_sid = unsafe { next_unchecked!(sid, at) }; if prev_sid.is_tagged() || at <= input.start().saturating_add(3) { core::mem::swap(&mut prev_sid, &mut sid); break; } at -= 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if sid.is_tagged() { break; } at -= 1; prev_sid = unsafe { next_unchecked!(sid, at) }; if prev_sid.is_tagged() { core::mem::swap(&mut prev_sid, &mut sid); break; } at -= 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if sid.is_tagged() { break; } at -= 1; } // If we quit out of the code above with an unknown state ID at // any point, then we need to re-compute that transition using // 'next_state', which will do NFA powerset construction for us. if sid.is_unknown() { cache.search_update(at); sid = dfa .next_state(cache, prev_sid, input.haystack()[at]) .map_err(|_| gave_up(at))?; } } if sid.is_tagged() { if sid.is_start() { // do nothing } else if sid.is_match() { let pattern = dfa.match_pattern(cache, sid, 0); // Since reverse searches report the beginning of a match // and the beginning is inclusive (not exclusive like the // end of a match), we add 1 to make it inclusive. mat = Some(HalfMatch::new(pattern, at + 1)); if earliest { cache.search_finish(at); return Ok(mat); } } else if sid.is_dead() { cache.search_finish(at); return Ok(mat); } else if sid.is_quit() { cache.search_finish(at); return Err(MatchError::quit(input.haystack()[at], at)); } else { debug_assert!(sid.is_unknown()); unreachable!("sid being unknown is a bug"); } } if at == input.start() { break; } at -= 1; } cache.search_finish(input.start()); eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; Ok(mat) } #[inline(never)] pub(crate) fn find_overlapping_fwd( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { state.mat = None; if input.is_done() { return Ok(()); } let pre = if input.get_anchored().is_anchored() { None } else { dfa.get_config().get_prefilter() }; if pre.is_some() { find_overlapping_fwd_imp(dfa, cache, input, pre, state) } else { find_overlapping_fwd_imp(dfa, cache, input, None, state) } } #[cfg_attr(feature = "perf-inline", inline(always))] fn find_overlapping_fwd_imp( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, pre: Option<&'_ Prefilter>, state: &mut OverlappingState, ) -> Result<(), MatchError> { // See 'prefilter_restart' docs for explanation. let universal_start = dfa.get_nfa().look_set_prefix_any().is_empty(); let mut sid = match state.id { None => { state.at = input.start(); init_fwd(dfa, cache, input)? } Some(sid) => { if let Some(match_index) = state.next_match_index { let match_len = dfa.match_len(cache, sid); if match_index < match_len { state.next_match_index = Some(match_index + 1); let pattern = dfa.match_pattern(cache, sid, match_index); state.mat = Some(HalfMatch::new(pattern, state.at)); return Ok(()); } } // Once we've reported all matches at a given position, we need to // advance the search to the next position. state.at += 1; if state.at > input.end() { return Ok(()); } sid } }; // NOTE: We don't optimize the crap out of this routine primarily because // it seems like most overlapping searches will have higher match counts, // and thus, throughput is perhaps not as important. But if you have a use // case for something faster, feel free to file an issue. cache.search_start(state.at); while state.at < input.end() { sid = dfa .next_state(cache, sid, input.haystack()[state.at]) .map_err(|_| gave_up(state.at))?; if sid.is_tagged() { state.id = Some(sid); if sid.is_start() { if let Some(ref pre) = pre { let span = Span::from(state.at..input.end()); match pre.find(input.haystack(), span) { None => return Ok(()), Some(ref span) => { if span.start > state.at { state.at = span.start; if !universal_start { sid = prefilter_restart( dfa, cache, &input, state.at, )?; } continue; } } } } } else if sid.is_match() { state.next_match_index = Some(1); let pattern = dfa.match_pattern(cache, sid, 0); state.mat = Some(HalfMatch::new(pattern, state.at)); cache.search_finish(state.at); return Ok(()); } else if sid.is_dead() { cache.search_finish(state.at); return Ok(()); } else if sid.is_quit() { cache.search_finish(state.at); return Err(MatchError::quit( input.haystack()[state.at], state.at, )); } else { debug_assert!(sid.is_unknown()); unreachable!("sid being unknown is a bug"); } } state.at += 1; cache.search_update(state.at); } let result = eoi_fwd(dfa, cache, input, &mut sid, &mut state.mat); state.id = Some(sid); if state.mat.is_some() { // '1' is always correct here since if we get to this point, this // always corresponds to the first (index '0') match discovered at // this position. So the next match to report at this position (if // it exists) is at index '1'. state.next_match_index = Some(1); } cache.search_finish(input.end()); result } #[inline(never)] pub(crate) fn find_overlapping_rev( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { state.mat = None; if input.is_done() { return Ok(()); } let mut sid = match state.id { None => { let sid = init_rev(dfa, cache, input)?; state.id = Some(sid); if input.start() == input.end() { state.rev_eoi = true; } else { state.at = input.end() - 1; } sid } Some(sid) => { if let Some(match_index) = state.next_match_index { let match_len = dfa.match_len(cache, sid); if match_index < match_len { state.next_match_index = Some(match_index + 1); let pattern = dfa.match_pattern(cache, sid, match_index); state.mat = Some(HalfMatch::new(pattern, state.at)); return Ok(()); } } // Once we've reported all matches at a given position, we need // to advance the search to the next position. However, if we've // already followed the EOI transition, then we know we're done // with the search and there cannot be any more matches to report. if state.rev_eoi { return Ok(()); } else if state.at == input.start() { // At this point, we should follow the EOI transition. This // will cause us the skip the main loop below and fall through // to the final 'eoi_rev' transition. state.rev_eoi = true; } else { // We haven't hit the end of the search yet, so move on. state.at -= 1; } sid } }; cache.search_start(state.at); while !state.rev_eoi { sid = dfa .next_state(cache, sid, input.haystack()[state.at]) .map_err(|_| gave_up(state.at))?; if sid.is_tagged() { state.id = Some(sid); if sid.is_start() { // do nothing } else if sid.is_match() { state.next_match_index = Some(1); let pattern = dfa.match_pattern(cache, sid, 0); state.mat = Some(HalfMatch::new(pattern, state.at + 1)); cache.search_finish(state.at); return Ok(()); } else if sid.is_dead() { cache.search_finish(state.at); return Ok(()); } else if sid.is_quit() { cache.search_finish(state.at); return Err(MatchError::quit( input.haystack()[state.at], state.at, )); } else { debug_assert!(sid.is_unknown()); unreachable!("sid being unknown is a bug"); } } if state.at == input.start() { break; } state.at -= 1; cache.search_update(state.at); } let result = eoi_rev(dfa, cache, input, &mut sid, &mut state.mat); state.rev_eoi = true; state.id = Some(sid); if state.mat.is_some() { // '1' is always correct here since if we get to this point, this // always corresponds to the first (index '0') match discovered at // this position. So the next match to report at this position (if // it exists) is at index '1'. state.next_match_index = Some(1); } cache.search_finish(input.start()); result } #[cfg_attr(feature = "perf-inline", inline(always))] fn init_fwd( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, ) -> Result<LazyStateID, MatchError> { let sid = dfa.start_state_forward(cache, input)?; // Start states can never be match states, since all matches are delayed // by 1 byte. debug_assert!(!sid.is_match()); Ok(sid) } #[cfg_attr(feature = "perf-inline", inline(always))] fn init_rev( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, ) -> Result<LazyStateID, MatchError> { let sid = dfa.start_state_reverse(cache, input)?; // Start states can never be match states, since all matches are delayed // by 1 byte. debug_assert!(!sid.is_match()); Ok(sid) } #[cfg_attr(feature = "perf-inline", inline(always))] fn eoi_fwd( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, sid: &mut LazyStateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { let sp = input.get_span(); match input.haystack().get(sp.end) { Some(&b) => { *sid = dfa.next_state(cache, *sid, b).map_err(|_| gave_up(sp.end))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, sp.end)); } else if sid.is_quit() { return Err(MatchError::quit(b, sp.end)); } } None => { *sid = dfa .next_eoi_state(cache, *sid) .map_err(|_| gave_up(input.haystack().len()))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, input.haystack().len())); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!sid.is_quit()); } } Ok(()) } #[cfg_attr(feature = "perf-inline", inline(always))] fn eoi_rev( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, sid: &mut LazyStateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { let sp = input.get_span(); if sp.start > 0 { let byte = input.haystack()[sp.start - 1]; *sid = dfa .next_state(cache, *sid, byte) .map_err(|_| gave_up(sp.start))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, sp.start)); } else if sid.is_quit() { return Err(MatchError::quit(byte, sp.start - 1)); } } else { *sid = dfa.next_eoi_state(cache, *sid).map_err(|_| gave_up(sp.start))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, 0)); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!sid.is_quit()); } Ok(()) } /// Re-compute the starting state that a DFA should be in after finding a /// prefilter candidate match at the position `at`. /// /// It is always correct to call this, but not always necessary. Namely, /// whenever the DFA has a universal start state, the DFA can remain in the /// start state that it was in when it ran the prefilter. Why? Because in that /// case, there is only one start state. /// /// When does a DFA have a universal start state? In precisely cases where /// it has no look-around assertions in its prefix. So for example, `\bfoo` /// does not have a universal start state because the start state depends on /// whether the byte immediately before the start position is a word byte or /// not. However, `foo\b` does have a universal start state because the word /// boundary does not appear in the pattern's prefix. /// /// So... most cases don't need this, but when a pattern doesn't have a /// universal start state, then after a prefilter candidate has been found, the /// current state *must* be re-litigated as if computing the start state at the /// beginning of the search because it might change. That is, not all start /// states are created equal. /// /// Why avoid it? Because while it's not super expensive, it isn't a trivial /// operation to compute the start state. It is much better to avoid it and /// just state in the current state if you know it to be correct. #[cfg_attr(feature = "perf-inline", inline(always))] fn prefilter_restart( dfa: &DFA, cache: &mut Cache, input: &Input<'_>, at: usize, ) -> Result<LazyStateID, MatchError> { let mut input = input.clone(); input.set_start(at); init_fwd(dfa, cache, &input) } /// A convenience routine for constructing a "gave up" match error. #[cfg_attr(feature = "perf-inline", inline(always))] fn gave_up(offset: usize) -> MatchError { MatchError::gave_up(offset) } <file_sep>/regex-cli/cmd/find/half/dfa.rs use regex_automata::{dfa::Automaton, Input}; use crate::{ args, util::{self, Table}, }; pub fn run_hybrid(p: &mut lexopt::Parser) -> anyhow::Result<()> { use regex_automata::hybrid::dfa::OverlappingState; const USAGE: &'static str = "\ Executes a search for half matches using the lazy DFA regex engine. USAGE: regex-cli find half hybrid [-p <pattern> ...] <haystack-path> regex-cli find half hybrid [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut hybrid = args::hybrid::Config::default(); let mut overlapping = args::overlapping::Config::default(); let mut find = super::super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut thompson, &mut hybrid, &mut overlapping, &mut find, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfafwd, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile forward nfa time", time); let (re, time) = util::timeitr(|| hybrid.from_nfa(&nfafwd))?; table.add("build forward hybrid time", time); let (mut cache, time) = util::timeit(|| re.create_cache()); table.add("cache creation time", time); if overlapping.enabled { let search = |input: &Input<'_>, state: &mut OverlappingState| { re.try_search_overlapping_fwd(&mut cache, input, state) }; if find.count { super::run_counts_overlapping( &mut table, &common, &find, &input, &haystack, re.pattern_len(), || OverlappingState::start(), |s| s.get_match(), search, )?; } else { super::run_search_overlapping( &mut table, &common, &find, &input, &haystack, || OverlappingState::start(), |s| s.get_match(), search, )?; } } else { let search = |input: &Input<'_>| re.try_search_fwd(&mut cache, input); if find.count { super::run_counts( &mut table, &common, &find, &input, &haystack, re.pattern_len(), search, )?; } else { super::run_search( &mut table, &common, &find, &input, &haystack, search, )?; } } Ok(()) } pub fn run_dense(p: &mut lexopt::Parser) -> anyhow::Result<()> { use regex_automata::dfa::OverlappingState; const USAGE: &'static str = "\ Executes a search for half matches using the dense DFA regex engine. USAGE: regex-cli find half dense [-p <pattern> ...] <haystack-path> regex-cli find half dense [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dense = args::dfa::Config::default(); let mut overlapping = args::overlapping::Config::default(); let mut find = super::super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut thompson, &mut dense, &mut overlapping, &mut find, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfafwd, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile forward nfa time", time); let (re, time) = util::timeitr(|| dense.from_nfa(&nfafwd))?; table.add("build forward dense DFA time", time); if overlapping.enabled { let search = |input: &Input<'_>, state: &mut OverlappingState| { re.try_search_overlapping_fwd(input, state) }; if find.count { super::run_counts_overlapping( &mut table, &common, &find, &input, &haystack, re.pattern_len(), || OverlappingState::start(), |s| s.get_match(), search, )?; } else { super::run_search_overlapping( &mut table, &common, &find, &input, &haystack, || OverlappingState::start(), |s| s.get_match(), search, )?; } } else { let search = |input: &Input<'_>| re.try_search_fwd(input); if find.count { super::run_counts( &mut table, &common, &find, &input, &haystack, re.pattern_len(), search, )?; } else { super::run_search( &mut table, &common, &find, &input, &haystack, search, )?; } } Ok(()) } pub fn run_sparse(p: &mut lexopt::Parser) -> anyhow::Result<()> { use regex_automata::dfa::OverlappingState; const USAGE: &'static str = "\ Executes a search for half matches using the sparse DFA regex engine. USAGE: regex-cli find half sparse [-p <pattern> ...] <haystack-path> regex-cli find half sparse [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut sparse = args::dfa::Config::default(); let mut overlapping = args::overlapping::Config::default(); let mut find = super::super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut thompson, &mut sparse, &mut overlapping, &mut find, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfafwd, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile forward nfa time", time); let (re, time) = util::timeitr(|| sparse.from_nfa_sparse(&nfafwd))?; table.add("build forward sparse DFA time", time); if overlapping.enabled { let search = |input: &Input<'_>, state: &mut OverlappingState| { re.try_search_overlapping_fwd(input, state) }; if find.count { super::run_counts_overlapping( &mut table, &common, &find, &input, &haystack, re.pattern_len(), || OverlappingState::start(), |s| s.get_match(), search, )?; } else { super::run_search_overlapping( &mut table, &common, &find, &input, &haystack, || OverlappingState::start(), |s| s.get_match(), search, )?; } } else { let search = |input: &Input<'_>| re.try_search_fwd(input); if find.count { super::run_counts( &mut table, &common, &find, &input, &haystack, re.pattern_len(), search, )?; } else { super::run_search( &mut table, &common, &find, &input, &haystack, search, )?; } } Ok(()) } <file_sep>/regex-capi/examples/iter.c /* * This example code shows how to iterate over all regex matches in a file, * emit the match location and print the contents of a capturing group. */ #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include "rure.h" int main() { /* Open a file and mmap it. */ int fd = open("sherlock.txt", O_RDONLY); if (fd == -1) { perror("failed to open sherlock.txt"); exit(1); } struct stat status; if (fstat(fd, &status) == -1) { perror("failed to stat sherlock.txt"); exit(1); } if ((uintmax_t)status.st_size > SIZE_MAX) { perror("file too big"); exit(1); } if (status.st_size == 0) { perror("file empty"); exit(1); } size_t sherlock_len = (size_t)status.st_size; const uint8_t *sherlock = (const uint8_t *)mmap( NULL, status.st_size, PROT_READ, MAP_PRIVATE, fd, 0); close(fd); if (sherlock == MAP_FAILED) { perror("could not mmap file"); exit(1); } /* * Compile the regular expression. A more convenient routine, * rure_compile_must, is also available, which will abort the process if * and print an error message to stderr if the regex compilation fails. * We show the full gory details here as an example. */ const char *pattern = "(\\w+)\\s+Holmes"; size_t pattern_len = strlen(pattern); rure_error *err = rure_error_new(); rure *re = rure_compile((const uint8_t *)pattern, pattern_len, RURE_FLAG_UNICODE | RURE_FLAG_CASEI, NULL, err); if (NULL == re) { /* A null regex means compilation failed and an error exists. */ printf("compilation of %s failed: %s\n", pattern, rure_error_message(err)); rure_error_free(err); munmap((char*)sherlock, sherlock_len); exit(1); } rure_error_free(err); /* * Create an iterator to find all successive non-overlapping matches. * For each match, we extract the location of the capturing group. */ rure_match group0 = {0}; rure_match group1 = {0}; rure_captures *caps = rure_captures_new(re); rure_iter *it = rure_iter_new(re); while (rure_iter_next_captures(it, sherlock, sherlock_len, caps)) { /* * Get the location of the full match and the capturing group. * We know that both accesses are successful since the body of the * loop only executes if there is a match and both capture groups * must match in order for the entire regex to match. * * N.B. The zeroth group corresponds to the full match of the regex. */ rure_captures_at(caps, 0, &group0); rure_captures_at(caps, 1, &group1); printf("%.*s (match at: %zu, %zu)\n", (int)(group1.end - group1.start), sherlock + group1.start, group0.start, group0.end); } /* Free all our resources. */ munmap((char*)sherlock, sherlock_len); rure_captures_free(caps); rure_iter_free(it); rure_free(re); return 0; } <file_sep>/regex-automata/src/meta/wrappers.rs /*! This module contains a boat load of wrappers around each of our internal regex engines. They encapsulate a few things: 1. The wrappers manage the conditional existence of the regex engine. Namely, the PikeVM is the only required regex engine. The rest are optional. These wrappers present a uniform API regardless of which engines are available. And availability might be determined by compile time features or by dynamic configuration via `meta::Config`. Encapsulating the conditional compilation features is in particular a huge simplification for the higher level code that composes these engines. 2. The wrappers manage construction of each engine, including skipping it if the engine is unavailable or configured to not be used. 3. The wrappers manage whether an engine *can* be used for a particular search configuration. For example, `BoundedBacktracker::get` only returns a backtracking engine when the haystack is bigger than the maximum supported length. The wrappers also sometimes take a position on when an engine *ought* to be used, but only in cases where the logic is extremely local to the engine itself. Otherwise, things like "choose between the backtracker and the one-pass DFA" are managed by the higher level meta strategy code. There are also corresponding wrappers for the various `Cache` types for each regex engine that needs them. If an engine is unavailable or not used, then a cache for it will *not* actually be allocated. */ use alloc::vec::Vec; use crate::{ meta::{ error::{BuildError, RetryError, RetryFailError}, regex::RegexInfo, }, nfa::thompson::{pikevm, NFA}, util::{prefilter::Prefilter, primitives::NonMaxUsize}, HalfMatch, Input, Match, MatchKind, PatternID, PatternSet, }; #[cfg(feature = "dfa-build")] use crate::dfa; #[cfg(feature = "dfa-onepass")] use crate::dfa::onepass; #[cfg(feature = "hybrid")] use crate::hybrid; #[cfg(feature = "nfa-backtrack")] use crate::nfa::thompson::backtrack; #[derive(Debug)] pub(crate) struct PikeVM(PikeVMEngine); impl PikeVM { pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, ) -> Result<PikeVM, BuildError> { PikeVMEngine::new(info, pre, nfa).map(PikeVM) } pub(crate) fn create_cache(&self) -> PikeVMCache { PikeVMCache::new(self) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn get(&self) -> &PikeVMEngine { &self.0 } } #[derive(Debug)] pub(crate) struct PikeVMEngine(pikevm::PikeVM); impl PikeVMEngine { pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, ) -> Result<PikeVMEngine, BuildError> { let pikevm_config = pikevm::Config::new() .match_kind(info.config().get_match_kind()) .prefilter(pre); let engine = pikevm::Builder::new() .configure(pikevm_config) .build_from_nfa(nfa.clone()) .map_err(BuildError::nfa)?; debug!("PikeVM built"); Ok(PikeVMEngine(engine)) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_match( &self, cache: &mut PikeVMCache, input: &Input<'_>, ) -> bool { self.0.is_match(cache.0.as_mut().unwrap(), input.clone()) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn search_slots( &self, cache: &mut PikeVMCache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { self.0.search_slots(cache.0.as_mut().unwrap(), input, slots) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn which_overlapping_matches( &self, cache: &mut PikeVMCache, input: &Input<'_>, patset: &mut PatternSet, ) { self.0.which_overlapping_matches( cache.0.as_mut().unwrap(), input, patset, ) } } #[derive(Clone, Debug)] pub(crate) struct PikeVMCache(Option<pikevm::Cache>); impl PikeVMCache { pub(crate) fn none() -> PikeVMCache { PikeVMCache(None) } pub(crate) fn new(builder: &PikeVM) -> PikeVMCache { PikeVMCache(Some(builder.get().0.create_cache())) } pub(crate) fn reset(&mut self, builder: &PikeVM) { self.0.as_mut().unwrap().reset(&builder.get().0); } pub(crate) fn memory_usage(&self) -> usize { self.0.as_ref().map_or(0, |c| c.memory_usage()) } } #[derive(Debug)] pub(crate) struct BoundedBacktracker(Option<BoundedBacktrackerEngine>); impl BoundedBacktracker { pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, ) -> Result<BoundedBacktracker, BuildError> { BoundedBacktrackerEngine::new(info, pre, nfa).map(BoundedBacktracker) } pub(crate) fn create_cache(&self) -> BoundedBacktrackerCache { BoundedBacktrackerCache::new(self) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn get( &self, input: &Input<'_>, ) -> Option<&BoundedBacktrackerEngine> { let engine = self.0.as_ref()?; // It is difficult to make the backtracker give up early if it is // guaranteed to eventually wind up in a match state. This is because // of the greedy nature of a backtracker: it just blindly mushes // forward. Every other regex engine is able to give up more quickly, // so even if the backtracker might be able to zip through faster than // (say) the PikeVM, we prefer the theoretical benefit that some other // engine might be able to scan much less of the haystack than the // backtracker. // // Now, if the haystack is really short already, then we allow the // backtracker to run. (This hasn't been litigated quantitatively with // benchmarks. Just a hunch.) if input.get_earliest() && input.haystack().len() > 128 { return None; } // If the backtracker is just going to return an error because the // haystack is too long, then obviously do not use it. if input.get_span().len() > engine.max_haystack_len() { return None; } Some(engine) } } #[derive(Debug)] pub(crate) struct BoundedBacktrackerEngine( #[cfg(feature = "nfa-backtrack")] backtrack::BoundedBacktracker, #[cfg(not(feature = "nfa-backtrack"))] (), ); impl BoundedBacktrackerEngine { pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, ) -> Result<Option<BoundedBacktrackerEngine>, BuildError> { #[cfg(feature = "nfa-backtrack")] { if !info.config().get_backtrack() || info.config().get_match_kind() != MatchKind::LeftmostFirst { return Ok(None); } let backtrack_config = backtrack::Config::new().prefilter(pre); let engine = backtrack::Builder::new() .configure(backtrack_config) .build_from_nfa(nfa.clone()) .map_err(BuildError::nfa)?; debug!("BoundedBacktracker built"); Ok(Some(BoundedBacktrackerEngine(engine))) } #[cfg(not(feature = "nfa-backtrack"))] { Ok(None) } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_match( &self, cache: &mut BoundedBacktrackerCache, input: &Input<'_>, ) -> bool { #[cfg(feature = "nfa-backtrack")] { // OK because we only permit access to this engine when we know // the haystack is short enough for the backtracker to run without // reporting an error. self.0 .try_is_match(cache.0.as_mut().unwrap(), input.clone()) .unwrap() } #[cfg(not(feature = "nfa-backtrack"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn search_slots( &self, cache: &mut BoundedBacktrackerCache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { #[cfg(feature = "nfa-backtrack")] { // OK because we only permit access to this engine when we know // the haystack is short enough for the backtracker to run without // reporting an error. self.0 .try_search_slots(cache.0.as_mut().unwrap(), input, slots) .unwrap() } #[cfg(not(feature = "nfa-backtrack"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] fn max_haystack_len(&self) -> usize { #[cfg(feature = "nfa-backtrack")] { self.0.max_haystack_len() } #[cfg(not(feature = "nfa-backtrack"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } } #[derive(Clone, Debug)] pub(crate) struct BoundedBacktrackerCache( #[cfg(feature = "nfa-backtrack")] Option<backtrack::Cache>, #[cfg(not(feature = "nfa-backtrack"))] (), ); impl BoundedBacktrackerCache { pub(crate) fn none() -> BoundedBacktrackerCache { #[cfg(feature = "nfa-backtrack")] { BoundedBacktrackerCache(None) } #[cfg(not(feature = "nfa-backtrack"))] { BoundedBacktrackerCache(()) } } pub(crate) fn new( builder: &BoundedBacktracker, ) -> BoundedBacktrackerCache { #[cfg(feature = "nfa-backtrack")] { BoundedBacktrackerCache( builder.0.as_ref().map(|e| e.0.create_cache()), ) } #[cfg(not(feature = "nfa-backtrack"))] { BoundedBacktrackerCache(()) } } pub(crate) fn reset(&mut self, builder: &BoundedBacktracker) { #[cfg(feature = "nfa-backtrack")] if let Some(ref e) = builder.0 { self.0.as_mut().unwrap().reset(&e.0); } } pub(crate) fn memory_usage(&self) -> usize { #[cfg(feature = "nfa-backtrack")] { self.0.as_ref().map_or(0, |c| c.memory_usage()) } #[cfg(not(feature = "nfa-backtrack"))] { 0 } } } #[derive(Debug)] pub(crate) struct OnePass(Option<OnePassEngine>); impl OnePass { pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> OnePass { OnePass(OnePassEngine::new(info, nfa)) } pub(crate) fn create_cache(&self) -> OnePassCache { OnePassCache::new(self) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn get(&self, input: &Input<'_>) -> Option<&OnePassEngine> { let engine = self.0.as_ref()?; if !input.get_anchored().is_anchored() && !engine.get_nfa().is_always_start_anchored() { return None; } Some(engine) } pub(crate) fn memory_usage(&self) -> usize { self.0.as_ref().map_or(0, |e| e.memory_usage()) } } #[derive(Debug)] pub(crate) struct OnePassEngine( #[cfg(feature = "dfa-onepass")] onepass::DFA, #[cfg(not(feature = "dfa-onepass"))] (), ); impl OnePassEngine { pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> Option<OnePassEngine> { #[cfg(feature = "dfa-onepass")] { if !info.config().get_onepass() { return None; } // In order to even attempt building a one-pass DFA, we require // that we either have at least one explicit capturing group or // there's a Unicode word boundary somewhere. If we don't have // either of these things, then the lazy DFA will almost certainly // be useable and be much faster. The only case where it might // not is if the lazy DFA isn't utilizing its cache effectively, // but in those cases, the underlying regex is almost certainly // not one-pass or is too big to fit within the current one-pass // implementation limits. if info.props_union().explicit_captures_len() == 0 && !info.props_union().look_set().contains_word_unicode() { debug!("not building OnePass because it isn't worth it"); return None; } let onepass_config = onepass::Config::new() .match_kind(info.config().get_match_kind()) // Like for the lazy DFA, we unconditionally enable this // because it doesn't cost much and makes the API more // flexible. .starts_for_each_pattern(true) .byte_classes(info.config().get_byte_classes()) .size_limit(info.config().get_onepass_size_limit()); let result = onepass::Builder::new() .configure(onepass_config) .build_from_nfa(nfa.clone()); let engine = match result { Ok(engine) => engine, Err(_err) => { debug!("OnePass failed to build: {}", _err); return None; } }; debug!("OnePass built, {} bytes", engine.memory_usage()); Some(OnePassEngine(engine)) } #[cfg(not(feature = "dfa-onepass"))] { None } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn search_slots( &self, cache: &mut OnePassCache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { #[cfg(feature = "dfa-onepass")] { // OK because we only permit getting a OnePassEngine when we know // the search is anchored and thus an error cannot occur. self.0 .try_search_slots(cache.0.as_mut().unwrap(), input, slots) .unwrap() } #[cfg(not(feature = "dfa-onepass"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } pub(crate) fn memory_usage(&self) -> usize { #[cfg(feature = "dfa-onepass")] { self.0.memory_usage() } #[cfg(not(feature = "dfa-onepass"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] fn get_nfa(&self) -> &NFA { #[cfg(feature = "dfa-onepass")] { self.0.get_nfa() } #[cfg(not(feature = "dfa-onepass"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } } #[derive(Clone, Debug)] pub(crate) struct OnePassCache( #[cfg(feature = "dfa-onepass")] Option<onepass::Cache>, #[cfg(not(feature = "dfa-onepass"))] (), ); impl OnePassCache { pub(crate) fn none() -> OnePassCache { #[cfg(feature = "dfa-onepass")] { OnePassCache(None) } #[cfg(not(feature = "dfa-onepass"))] { OnePassCache(()) } } pub(crate) fn new(builder: &OnePass) -> OnePassCache { #[cfg(feature = "dfa-onepass")] { OnePassCache(builder.0.as_ref().map(|e| e.0.create_cache())) } #[cfg(not(feature = "dfa-onepass"))] { OnePassCache(()) } } pub(crate) fn reset(&mut self, builder: &OnePass) { #[cfg(feature = "dfa-onepass")] if let Some(ref e) = builder.0 { self.0.as_mut().unwrap().reset(&e.0); } } pub(crate) fn memory_usage(&self) -> usize { #[cfg(feature = "dfa-onepass")] { self.0.as_ref().map_or(0, |c| c.memory_usage()) } #[cfg(not(feature = "dfa-onepass"))] { 0 } } } #[derive(Debug)] pub(crate) struct Hybrid(Option<HybridEngine>); impl Hybrid { pub(crate) fn none() -> Hybrid { Hybrid(None) } pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, nfarev: &NFA, ) -> Hybrid { Hybrid(HybridEngine::new(info, pre, nfa, nfarev)) } pub(crate) fn create_cache(&self) -> HybridCache { HybridCache::new(self) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&HybridEngine> { let engine = self.0.as_ref()?; Some(engine) } pub(crate) fn is_some(&self) -> bool { self.0.is_some() } } #[derive(Debug)] pub(crate) struct HybridEngine( #[cfg(feature = "hybrid")] hybrid::regex::Regex, #[cfg(not(feature = "hybrid"))] (), ); impl HybridEngine { pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, nfarev: &NFA, ) -> Option<HybridEngine> { #[cfg(feature = "hybrid")] { if !info.config().get_hybrid() { return None; } let dfa_config = hybrid::dfa::Config::new() .match_kind(info.config().get_match_kind()) .prefilter(pre.clone()) // Enabling this is necessary for ensuring we can service any // kind of 'Input' search without error. For the lazy DFA, // this is not particularly costly, since the start states are // generated lazily. .starts_for_each_pattern(true) .byte_classes(info.config().get_byte_classes()) .unicode_word_boundary(true) .specialize_start_states(pre.is_some()) .cache_capacity(info.config().get_hybrid_cache_capacity()) // This makes it possible for building a lazy DFA to // fail even though the NFA has already been built. Namely, // if the cache capacity is too small to fit some minimum // number of states (which is small, like 4 or 5), then the // DFA will refuse to build. // // We shouldn't enable this to make building always work, since // this could cause the allocation of a cache bigger than the // provided capacity amount. // // This is effectively the only reason why building a lazy DFA // could fail. If it does, then we simply suppress the error // and return None. .skip_cache_capacity_check(false) // This and enabling heuristic Unicode word boundary support // above make it so the lazy DFA can quit at match time. .minimum_cache_clear_count(Some(3)) .minimum_bytes_per_state(Some(10)); let result = hybrid::dfa::Builder::new() .configure(dfa_config.clone()) .build_from_nfa(nfa.clone()); let fwd = match result { Ok(fwd) => fwd, Err(_err) => { debug!("forward lazy DFA failed to build: {}", _err); return None; } }; let result = hybrid::dfa::Builder::new() .configure( dfa_config .clone() .match_kind(MatchKind::All) .prefilter(None) .specialize_start_states(false), ) .build_from_nfa(nfarev.clone()); let rev = match result { Ok(rev) => rev, Err(_err) => { debug!("reverse lazy DFA failed to build: {}", _err); return None; } }; let engine = hybrid::regex::Builder::new().build_from_dfas(fwd, rev); debug!("lazy DFA built"); Some(HybridEngine(engine)) } #[cfg(not(feature = "hybrid"))] { None } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search( &self, cache: &mut HybridCache, input: &Input<'_>, ) -> Result<Option<Match>, RetryFailError> { #[cfg(feature = "hybrid")] { let cache = cache.0.as_mut().unwrap(); self.0.try_search(cache, input).map_err(|e| e.into()) } #[cfg(not(feature = "hybrid"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_fwd( &self, cache: &mut HybridCache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, RetryFailError> { #[cfg(feature = "hybrid")] { let fwd = self.0.forward(); let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0; fwd.try_search_fwd(&mut fwdcache, input).map_err(|e| e.into()) } #[cfg(not(feature = "hybrid"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_fwd_stopat( &self, cache: &mut HybridCache, input: &Input<'_>, ) -> Result<Result<HalfMatch, usize>, RetryFailError> { #[cfg(feature = "hybrid")] { let dfa = self.0.forward(); let mut cache = cache.0.as_mut().unwrap().as_parts_mut().0; crate::meta::stopat::hybrid_try_search_half_fwd( dfa, &mut cache, input, ) } #[cfg(not(feature = "hybrid"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_rev( &self, cache: &mut HybridCache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, RetryFailError> { #[cfg(feature = "hybrid")] { let rev = self.0.reverse(); let mut revcache = cache.0.as_mut().unwrap().as_parts_mut().1; rev.try_search_rev(&mut revcache, input).map_err(|e| e.into()) } #[cfg(not(feature = "hybrid"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_rev_limited( &self, cache: &mut HybridCache, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { #[cfg(feature = "hybrid")] { let dfa = self.0.reverse(); let mut cache = cache.0.as_mut().unwrap().as_parts_mut().1; crate::meta::limited::hybrid_try_search_half_rev( dfa, &mut cache, input, min_start, ) } #[cfg(not(feature = "hybrid"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[inline] pub(crate) fn try_which_overlapping_matches( &self, cache: &mut HybridCache, input: &Input<'_>, patset: &mut PatternSet, ) -> Result<(), RetryFailError> { #[cfg(feature = "hybrid")] { let fwd = self.0.forward(); let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0; fwd.try_which_overlapping_matches(&mut fwdcache, input, patset) .map_err(|e| e.into()) } #[cfg(not(feature = "hybrid"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } } #[derive(Clone, Debug)] pub(crate) struct HybridCache( #[cfg(feature = "hybrid")] Option<hybrid::regex::Cache>, #[cfg(not(feature = "hybrid"))] (), ); impl HybridCache { pub(crate) fn none() -> HybridCache { #[cfg(feature = "hybrid")] { HybridCache(None) } #[cfg(not(feature = "hybrid"))] { HybridCache(()) } } pub(crate) fn new(builder: &Hybrid) -> HybridCache { #[cfg(feature = "hybrid")] { HybridCache(builder.0.as_ref().map(|e| e.0.create_cache())) } #[cfg(not(feature = "hybrid"))] { HybridCache(()) } } pub(crate) fn reset(&mut self, builder: &Hybrid) { #[cfg(feature = "hybrid")] if let Some(ref e) = builder.0 { self.0.as_mut().unwrap().reset(&e.0); } } pub(crate) fn memory_usage(&self) -> usize { #[cfg(feature = "hybrid")] { self.0.as_ref().map_or(0, |c| c.memory_usage()) } #[cfg(not(feature = "hybrid"))] { 0 } } } #[derive(Debug)] pub(crate) struct DFA(Option<DFAEngine>); impl DFA { pub(crate) fn none() -> DFA { DFA(None) } pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, nfarev: &NFA, ) -> DFA { DFA(DFAEngine::new(info, pre, nfa, nfarev)) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&DFAEngine> { let engine = self.0.as_ref()?; Some(engine) } pub(crate) fn is_some(&self) -> bool { self.0.is_some() } pub(crate) fn memory_usage(&self) -> usize { self.0.as_ref().map_or(0, |e| e.memory_usage()) } } #[derive(Debug)] pub(crate) struct DFAEngine( #[cfg(feature = "dfa-build")] dfa::regex::Regex, #[cfg(not(feature = "dfa-build"))] (), ); impl DFAEngine { pub(crate) fn new( info: &RegexInfo, pre: Option<Prefilter>, nfa: &NFA, nfarev: &NFA, ) -> Option<DFAEngine> { #[cfg(feature = "dfa-build")] { if !info.config().get_dfa() { return None; } // If our NFA is anything but small, don't even bother with a DFA. if let Some(state_limit) = info.config().get_dfa_state_limit() { if nfa.states().len() > state_limit { debug!( "skipping full DFA because NFA has {} states, \ which exceeds the heuristic limit of {}", nfa.states().len(), state_limit, ); return None; } } // We cut the size limit in four because the total heap used by // DFA construction is determinization aux memory and the DFA // itself, and those things are configured independently in the // lower level DFA builder API. And then split that in two because // of forward and reverse DFAs. let size_limit = info.config().get_dfa_size_limit().map(|n| n / 4); let dfa_config = dfa::dense::Config::new() .match_kind(info.config().get_match_kind()) .prefilter(pre.clone()) // Enabling this is necessary for ensuring we can service any // kind of 'Input' search without error. For the full DFA, this // can be quite costly. But since we have such a small bound // on the size of the DFA, in practice, any multl-regexes are // probably going to blow the limit anyway. .starts_for_each_pattern(true) .byte_classes(info.config().get_byte_classes()) .unicode_word_boundary(true) .specialize_start_states(pre.is_some()) .determinize_size_limit(size_limit) .dfa_size_limit(size_limit); let result = dfa::dense::Builder::new() .configure(dfa_config.clone()) .build_from_nfa(&nfa); let fwd = match result { Ok(fwd) => fwd, Err(_err) => { debug!("forward full DFA failed to build: {}", _err); return None; } }; let result = dfa::dense::Builder::new() .configure( dfa_config .clone() // We never need unanchored reverse searches, so // there's no point in building it into the DFA, which // WILL take more space. (This isn't done for the lazy // DFA because the DFA is, well, lazy. It doesn't pay // the cost for supporting unanchored searches unless // you actually do an unanchored search, which we // don't.) .start_kind(dfa::StartKind::Anchored) .match_kind(MatchKind::All) .prefilter(None) .specialize_start_states(false), ) .build_from_nfa(&nfarev); let rev = match result { Ok(rev) => rev, Err(_err) => { debug!("reverse full DFA failed to build: {}", _err); return None; } }; let engine = dfa::regex::Builder::new().build_from_dfas(fwd, rev); debug!( "fully compiled forward and reverse DFAs built, {} bytes", engine.forward().memory_usage() + engine.reverse().memory_usage(), ); Some(DFAEngine(engine)) } #[cfg(not(feature = "dfa-build"))] { None } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search( &self, input: &Input<'_>, ) -> Result<Option<Match>, RetryFailError> { #[cfg(feature = "dfa-build")] { self.0.try_search(input).map_err(|e| e.into()) } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_fwd( &self, input: &Input<'_>, ) -> Result<Option<HalfMatch>, RetryFailError> { #[cfg(feature = "dfa-build")] { use crate::dfa::Automaton; self.0.forward().try_search_fwd(input).map_err(|e| e.into()) } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_fwd_stopat( &self, input: &Input<'_>, ) -> Result<Result<HalfMatch, usize>, RetryFailError> { #[cfg(feature = "dfa-build")] { let dfa = self.0.forward(); crate::meta::stopat::dfa_try_search_half_fwd(dfa, input) } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_rev( &self, input: &Input<'_>, ) -> Result<Option<HalfMatch>, RetryFailError> { #[cfg(feature = "dfa-build")] { use crate::dfa::Automaton; self.0.reverse().try_search_rev(&input).map_err(|e| e.into()) } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_rev_limited( &self, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { #[cfg(feature = "dfa-build")] { let dfa = self.0.reverse(); crate::meta::limited::dfa_try_search_half_rev( dfa, input, min_start, ) } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } #[inline] pub(crate) fn try_which_overlapping_matches( &self, input: &Input<'_>, patset: &mut PatternSet, ) -> Result<(), RetryFailError> { #[cfg(feature = "dfa-build")] { use crate::dfa::Automaton; self.0 .forward() .try_which_overlapping_matches(input, patset) .map_err(|e| e.into()) } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } pub(crate) fn memory_usage(&self) -> usize { #[cfg(feature = "dfa-build")] { self.0.forward().memory_usage() + self.0.reverse().memory_usage() } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } } #[derive(Debug)] pub(crate) struct ReverseHybrid(Option<ReverseHybridEngine>); impl ReverseHybrid { pub(crate) fn none() -> ReverseHybrid { ReverseHybrid(None) } pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseHybrid { ReverseHybrid(ReverseHybridEngine::new(info, nfarev)) } pub(crate) fn create_cache(&self) -> ReverseHybridCache { ReverseHybridCache::new(self) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn get( &self, _input: &Input<'_>, ) -> Option<&ReverseHybridEngine> { let engine = self.0.as_ref()?; Some(engine) } } #[derive(Debug)] pub(crate) struct ReverseHybridEngine( #[cfg(feature = "hybrid")] hybrid::dfa::DFA, #[cfg(not(feature = "hybrid"))] (), ); impl ReverseHybridEngine { pub(crate) fn new( info: &RegexInfo, nfarev: &NFA, ) -> Option<ReverseHybridEngine> { #[cfg(feature = "hybrid")] { if !info.config().get_hybrid() { return None; } // Since we only use this for reverse searches, we can hard-code // a number of things like match semantics, prefilters, starts // for each pattern and so on. let dfa_config = hybrid::dfa::Config::new() .match_kind(MatchKind::All) .prefilter(None) .starts_for_each_pattern(false) .byte_classes(info.config().get_byte_classes()) .unicode_word_boundary(true) .specialize_start_states(false) .cache_capacity(info.config().get_hybrid_cache_capacity()) .skip_cache_capacity_check(false) .minimum_cache_clear_count(Some(3)) .minimum_bytes_per_state(Some(10)); let result = hybrid::dfa::Builder::new() .configure(dfa_config) .build_from_nfa(nfarev.clone()); let rev = match result { Ok(rev) => rev, Err(_err) => { debug!("lazy reverse DFA failed to build: {}", _err); return None; } }; debug!("lazy reverse DFA built"); Some(ReverseHybridEngine(rev)) } #[cfg(not(feature = "hybrid"))] { None } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_rev_limited( &self, cache: &mut ReverseHybridCache, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { #[cfg(feature = "hybrid")] { let dfa = &self.0; let mut cache = cache.0.as_mut().unwrap(); crate::meta::limited::hybrid_try_search_half_rev( dfa, &mut cache, input, min_start, ) } #[cfg(not(feature = "hybrid"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } } #[derive(Clone, Debug)] pub(crate) struct ReverseHybridCache( #[cfg(feature = "hybrid")] Option<hybrid::dfa::Cache>, #[cfg(not(feature = "hybrid"))] (), ); impl ReverseHybridCache { pub(crate) fn none() -> ReverseHybridCache { #[cfg(feature = "hybrid")] { ReverseHybridCache(None) } #[cfg(not(feature = "hybrid"))] { ReverseHybridCache(()) } } pub(crate) fn new(builder: &ReverseHybrid) -> ReverseHybridCache { #[cfg(feature = "hybrid")] { ReverseHybridCache(builder.0.as_ref().map(|e| e.0.create_cache())) } #[cfg(not(feature = "hybrid"))] { ReverseHybridCache(()) } } pub(crate) fn reset(&mut self, builder: &ReverseHybrid) { #[cfg(feature = "hybrid")] if let Some(ref e) = builder.0 { self.0.as_mut().unwrap().reset(&e.0); } } pub(crate) fn memory_usage(&self) -> usize { #[cfg(feature = "hybrid")] { self.0.as_ref().map_or(0, |c| c.memory_usage()) } #[cfg(not(feature = "hybrid"))] { 0 } } } #[derive(Debug)] pub(crate) struct ReverseDFA(Option<ReverseDFAEngine>); impl ReverseDFA { pub(crate) fn none() -> ReverseDFA { ReverseDFA(None) } pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseDFA { ReverseDFA(ReverseDFAEngine::new(info, nfarev)) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&ReverseDFAEngine> { let engine = self.0.as_ref()?; Some(engine) } pub(crate) fn is_some(&self) -> bool { self.0.is_some() } pub(crate) fn memory_usage(&self) -> usize { self.0.as_ref().map_or(0, |e| e.memory_usage()) } } #[derive(Debug)] pub(crate) struct ReverseDFAEngine( #[cfg(feature = "dfa-build")] dfa::dense::DFA<Vec<u32>>, #[cfg(not(feature = "dfa-build"))] (), ); impl ReverseDFAEngine { pub(crate) fn new( info: &RegexInfo, nfarev: &NFA, ) -> Option<ReverseDFAEngine> { #[cfg(feature = "dfa-build")] { if !info.config().get_dfa() { return None; } // If our NFA is anything but small, don't even bother with a DFA. if let Some(state_limit) = info.config().get_dfa_state_limit() { if nfarev.states().len() > state_limit { debug!( "skipping full reverse DFA because NFA has {} states, \ which exceeds the heuristic limit of {}", nfarev.states().len(), state_limit, ); return None; } } // We cut the size limit in two because the total heap used by DFA // construction is determinization aux memory and the DFA itself, // and those things are configured independently in the lower level // DFA builder API. let size_limit = info.config().get_dfa_size_limit().map(|n| n / 2); // Since we only use this for reverse searches, we can hard-code // a number of things like match semantics, prefilters, starts // for each pattern and so on. We also disable acceleration since // it's incompatible with limited searches (which is the only // operation we support for this kind of engine at the moment). let dfa_config = dfa::dense::Config::new() .match_kind(MatchKind::All) .prefilter(None) .accelerate(false) .start_kind(dfa::StartKind::Anchored) .starts_for_each_pattern(false) .byte_classes(info.config().get_byte_classes()) .unicode_word_boundary(true) .specialize_start_states(false) .determinize_size_limit(size_limit) .dfa_size_limit(size_limit); let result = dfa::dense::Builder::new() .configure(dfa_config) .build_from_nfa(&nfarev); let rev = match result { Ok(rev) => rev, Err(_err) => { debug!("full reverse DFA failed to build: {}", _err); return None; } }; debug!( "fully compiled reverse DFA built, {} bytes", rev.memory_usage() ); Some(ReverseDFAEngine(rev)) } #[cfg(not(feature = "dfa-build"))] { None } } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn try_search_half_rev_limited( &self, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { #[cfg(feature = "dfa-build")] { let dfa = &self.0; crate::meta::limited::dfa_try_search_half_rev( dfa, input, min_start, ) } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } pub(crate) fn memory_usage(&self) -> usize { #[cfg(feature = "dfa-build")] { self.0.memory_usage() } #[cfg(not(feature = "dfa-build"))] { // Impossible to reach because this engine is never constructed // if the requisite features aren't enabled. unreachable!() } } } <file_sep>/regex-cli/args/dfa.rs use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::{ dfa::{self, dense, sparse}, nfa::thompson::NFA, MatchKind, }, }; use crate::args::{self, flags, Configurable, Usage}; /// Exposes the configuration for a dense (and also therefore sparse) DFAs. #[derive(Debug, Default)] pub struct Config { dense: dense::Config, } impl Config { /// Return a `dfa::dense::Config` object from this configuration. pub fn dense(&self) -> anyhow::Result<dense::Config> { Ok(self.dense.clone()) } /// Returns a new configuration that compiles a reverse DFA from a reverse /// NFA. The caller is responsible for reversing the NFA. pub fn reversed(&self) -> Config { let dense = self .dense .clone() .prefilter(None) .start_kind(dfa::StartKind::Anchored) .match_kind(MatchKind::All); Config { dense } } /// Runs determinization on the given NFA to produce a dense DFA. If /// determinization fails, then an error is returned. pub fn from_nfa(&self, nfa: &NFA) -> anyhow::Result<dense::DFA<Vec<u32>>> { dense::Builder::new() .configure(self.dense()?) .build_from_nfa(nfa) .context("failed to compile dense DFA") } /// Runs determinization on the given NFA to produce a dense DFA, and then /// converts it to a sparse DFA. If determinization or conversion to a /// sparse DFA fails, then an error is returned. pub fn from_nfa_sparse( &self, nfa: &NFA, ) -> anyhow::Result<sparse::DFA<Vec<u8>>> { self.from_nfa(nfa)?.to_sparse().context("failed to compile sparse DFA") } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('A') | Arg::Long("no-accelerate") => { self.dense = self.dense.clone().accelerate(false); } Arg::Long("minimize") => { self.dense = self.dense.clone().minimize(true); } Arg::Short('k') | Arg::Long("match-kind") => { let kind: flags::MatchKind = args::parse(p, "-k/--match-kind")?; self.dense = self.dense.clone().match_kind(kind.kind); } Arg::Long("start-kind") => { let kind: flags::StartKind = args::parse(p, "--start-kind")?; self.dense = self.dense.clone().start_kind(kind.kind); } Arg::Long("starts-for-each-pattern") => { self.dense = self.dense.clone().starts_for_each_pattern(true); } Arg::Short('C') | Arg::Long("no-byte-classes") => { self.dense = self.dense.clone().byte_classes(false); } Arg::Long("unicode-word-boundary") => { self.dense = self.dense.clone().unicode_word_boundary(true); } Arg::Long("quit") => { let set: flags::ByteSet = args::parse(p, "--quit")?; for &byte in set.0.iter() { self.dense = self.dense.clone().quit(byte, true); } } Arg::Long("specialize-start-states") => { self.dense = self.dense.clone().specialize_start_states(true); } Arg::Long("dfa-size-limit") => { let limit = args::parse_maybe(p, "--dfa-size-limit")?; self.dense = self.dense.clone().dfa_size_limit(limit); } Arg::Long("determinize-size-limit") => { let limit = args::parse_maybe(p, "--determinize-size-limit")?; self.dense = self.dense.clone().determinize_size_limit(limit); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "-A, --no-accelerate", "Disable DFA state acceleration.", r#" Disable DFA state acceleration. It is enabled by default. When enabled, DFA states with few outgoing transitions are detected and tagged with special information that fast vector routines should search for those outgoing transitions whenever that state is entered. Acceleration is generally a heuristic optimization, since if the vector routine doesn't in practice skip over many bytes, it can overall make the DFA search slower. "#, ), Usage::new( "--minimize", "Minimize the DFA.", r#" When enabled, the DFA is minimized. A minimized DFA is said to be as small as it possibly can be for the given regular language. Note that DFA minimization can take a very long time. Generally speaking, the benefits of minimization are a smaller DFA. Usually that doesn't directly translate to faster search times, but it can if it enables more efficient use of your CPU's cache. DFA minimization can also enable more opportunities for DFA acceleration. "#, ), flags::MatchKind::USAGE, flags::StartKind::USAGE, Usage::new( "--starts-for-each-pattern", "Add anchored start states for each pattern.", r#" Add anchored start states for each pattern. This permits running an anchored search for a specific pattern using the --pattern-id flag. (Assuming this is a search command.) "#, ), Usage::new( "-C, --no-byte-classes", "Disable byte classes.", r#" This causes all bytes to be an equivalence class unto themselves. By default, bytes are grouped into equivalence classes to reduce the size of the alphabet for a DFA, and therefore decreases overall space usage. It can be quite convenient to disable byte classes when looking at the debug representation of a DFA. Otherwise, the transitions are much harder for a human to read. "#, ), Usage::new( "--unicode-word-boundary", "Enable heuristic support for Unicode word boundaries.", r#" Enable heuristic support for Unicode word boundaries. When enabled, a DFA will treat a Unicode word boundary as if it were an ASCII boundary, but will quit if it sees any non-ASCII byte. This is disabled by default, in which case, attempting to compile a DFA with a Unicode word boundary will result in an error. Note that enabling this is very similar to using the --quit flag and providing every non-ASCII byte as a quit byte. The only difference is that when this flag is used, the quit bytes are only added if the pattern contains a Unicode word boundary. "#, ), Usage::new( "--quit", "Add quit bytes to this DFA.", r#" Add quit bytes to this DFA. When a quit byte is added to a DFA, then an outgoing transition to every state for this byte is added to the DFA that points to a special sentinel "quit" state. If the "quit" state is entered during a search, then an error is returned. The bytes given represent a set and may be specified as a sequence. Escape sequences like \n and \xFF are supported. "#, ), Usage::new( "--specialize-start-states", "Specializes start states for prefilter support.", r#" When given, start states are "specialized" such that prefilters are better supported. Namely, when start states are specialized they are given a special tag that results in them being treated as a special case when entered at search time. The special case is that a prefilter can be run at that point in an attempt to accelerate the search. In general, it only makes sense to specialize start states when a prefilter is also enabled. Note also that if start states are not specialized (the default), then it is in general not possible to determine whether any given state ID is a start state, unless you've enumerated all possible start states and checked it against that set. "#, ), Usage::new( "--dfa-size-limit", "Set a limit on heap used by a DFA in bytes.", r#" This sets a limit on the number of heap memory a DFA can use. The limit is enforced at DFA construction time. If the limit is exceeded, then construction will fail. A special value of 'none' may be given, which disables the limit. "#, ), Usage::new( "--determinize-size-limit", "Set a limit on heap used by a DFA in bytes.", r#" This sets a limit on the number of heap memory that determinization can use. The limit is enforced during determinization. If the limit is exceeded, then determinization and therefore construction of the DFA will fail. This limit only applies to ancillary heap memory used by determinization and not to the heap memory used by the DFA's transition table itself. The limit the size of the DFA, use the --dfa-size-limit flag. A special value of 'none' may be given, which disables the limit. "#, ), ]; USAGES } } <file_sep>/fuzz/fuzz_targets/fuzz_regex_match.rs #![no_main] use libfuzzer_sys::{arbitrary, fuzz_target, Corpus}; #[derive(arbitrary::Arbitrary)] struct FuzzCase<'a> { pattern: &'a str, haystack: &'a str, case_insensitive: bool, multi_line: bool, dot_matches_new_line: bool, swap_greed: bool, ignore_whitespace: bool, unicode: bool, octal: bool, } impl std::fmt::Debug for FuzzCase<'_> { fn fmt( &self, fmt: &mut std::fmt::Formatter, ) -> Result<(), std::fmt::Error> { let FuzzCase { pattern, case_insensitive, multi_line, dot_matches_new_line, swap_greed, ignore_whitespace, unicode, octal, haystack, } = self; write!( fmt, r#" let Ok(re) = regex::RegexBuilder::new({pattern:?}) .case_insensitive({case_insensitive:?}) .multi_line({multi_line:?}) .dot_matches_new_line({dot_matches_new_line:?}) .swap_greed({swap_greed:?}) .ignore_whitespace({ignore_whitespace:?}) .unicode({unicode:?}) .octal({octal:?}) .size_limit(1<<20) .build() else {{ return }}; re.is_match({haystack:?}); "# ) } } fuzz_target!(|case: FuzzCase| -> Corpus { let _ = env_logger::try_init(); if case.haystack.len() > (16 * (1 << 10)) { return Corpus::Reject; } let Ok(re) = regex::RegexBuilder::new(case.pattern) .case_insensitive(case.case_insensitive) .multi_line(case.multi_line) .dot_matches_new_line(case.dot_matches_new_line) .swap_greed(case.swap_greed) .ignore_whitespace(case.ignore_whitespace) .unicode(case.unicode) .octal(case.octal) .size_limit(1<<18) .build() else { return Corpus::Reject }; re.is_match(case.haystack); Corpus::Keep }); <file_sep>/regex-cli/cmd/find/match/mod.rs use std::io::{stdout, Write}; use { anyhow::Context, bstr::ByteSlice, lexopt::Parser, regex_automata::{Input, Match, MatchError, PatternID}, }; use crate::{ args, util::{self, Table}, }; mod dfa; mod nfa; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search. USAGE: regex-cli find match <engine> ENGINES: backtrack Search with the bounded backtracker regex engine. dense Search with the dense DFA regex engine. hybrid Search with the lazy DFA regex engine. lite Search with the regex-lite engine. meta Search with the meta regex engine. onepass Search with the one-pass DFA regex engine. pikevm Search with the PikeVM regex engine. regex Search with the top-level API regex engine. sparse Search with the sparse DFA regex engine. "; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "backtrack" => nfa::run_backtrack(p), "dense" => dfa::run_dense(p), "hybrid" => dfa::run_hybrid(p), "lite" => run_lite(p), "meta" => run_meta(p), "onepass" => dfa::run_onepass(p), "pikevm" => nfa::run_pikevm(p), "regex" => run_regex(p), "sparse" => dfa::run_sparse(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_regex(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the top-level API regex engine. USAGE: regex-cli find match regex [-p <pattern> ...] <haystack-path> regex-cli find match regex [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut api = args::api::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut haystack, &mut syntax, &mut api, &mut find, ], )?; let pats = patterns.get()?; let syn = syntax.syntax()?; let mut table = Table::empty(); let (re, time) = util::timeitr(|| api.from_patterns(&syn, &pats))?; table.add("build regex time", time); // The top-level API doesn't support regex-automata's more granular Input // abstraction. let input = args::input::Config::default(); let search = |input: &Input<'_>| { Ok(re .find_at(input.haystack(), input.start()) .map(|m| Match::new(PatternID::ZERO, m.start()..m.end()))) }; if find.count { run_counts(&mut table, &common, &find, &input, &haystack, 1, search)?; } else { run_search(&mut table, &common, &find, &input, &haystack, search)?; } Ok(()) } fn run_meta(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the meta regex engine. USAGE: regex-cli find match meta [-p <pattern> ...] <haystack-path> regex-cli find match meta [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut meta = args::meta::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut meta, &mut find, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let re = if meta.build_from_patterns() { let (re, time) = util::timeitr(|| meta.from_patterns(&syntax, &pats))?; table.add("build meta time", time); re } else { let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (re, time) = util::timeitr(|| meta.from_hirs(&hirs))?; table.add("build meta time", time); re }; let search = |input: &Input<'_>| Ok(re.search(input)); if find.count { run_counts( &mut table, &common, &find, &input, &haystack, re.pattern_len(), search, )?; } else { run_search(&mut table, &common, &find, &input, &haystack, search)?; } Ok(()) } fn run_lite(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the top-level regex-lite engine. Note that since the regex-lite crate doesn't have an API for search arbitrary byte slices, the haystack must be valid UTF-8. If it isn't, this command will report an error. USAGE: regex-cli find match lite [-p <pattern> ...] <haystack-path> regex-cli find match lite [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut lite = args::lite::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut haystack, &mut syntax, &mut lite, &mut find, ], )?; let pats = patterns.get()?; let syn = syntax.syntax()?; let mut table = Table::empty(); let (re, time) = util::timeitr(|| lite.from_patterns(&syn, &pats))?; table.add("build regex time", time); // Check that the haystack is valid UTF-8 since regex-lite doesn't support // searching arbitrary byte sequences. (At time of writing.) haystack.get()?.to_str()?; // The top-level regex-lite API doesn't support regex-automata's more // granular Input abstraction. let input = args::input::Config::default(); let search = |input: &Input<'_>| { let haystack = input.haystack().to_str().unwrap(); Ok(re .find_at(haystack, input.start()) .map(|m| Match::new(PatternID::ZERO, m.start()..m.end()))) }; if find.count { run_counts(&mut table, &common, &find, &input, &haystack, 1, search)?; } else { run_search(&mut table, &common, &find, &input, &haystack, search)?; } Ok(()) } /// A function that takes in a bunch of configuration, runs the given search /// routine, and prints out a table of counts. fn run_counts( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, pattern_len: usize, mut search: impl FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (counts, time) = util::timeitr(|| { let mut counts = vec![0; pattern_len]; for _ in 0..find.repeat() { let mut it = regex_automata::util::iter::Searcher::new(input.clone()); while let Some(m) = it.try_advance(&mut search)? { counts[m.pattern().as_usize()] += 1; } } Ok::<_, anyhow::Error>(counts) })?; table.add("search time", time); table.add("total matches", counts.iter().copied().sum::<u64>()); if common.table() { table.print(&mut out)?; } if !common.quiet { for (i, &count) in counts.iter().enumerate() { let pid = PatternID::new(i).context("invalid pattern ID")?; writeln!(out, "{}:{}", pid.as_usize(), count)?; } } Ok(()) }) } /// Like `run_counts`, but prints the actual matches instead. fn run_search( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, mut search: impl FnMut(&Input<'_>) -> Result<Option<Match>, MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (matches, time) = util::timeitr(|| { let mut matches = vec![]; for _ in 0..find.repeat() { let mut it = regex_automata::util::iter::Searcher::new(input.clone()); while let Some(m) = it.try_advance(&mut search)? { matches.push(m); } } Ok::<_, anyhow::Error>(matches) })?; table.add("search time", time); table.add("total matches", matches.len()); if common.table() { table.print(&mut out)?; } if !common.quiet { for m in matches.iter() { writeln!( out, "{}:{}:{}:{}", m.pattern().as_usize(), m.start(), m.end(), input.haystack()[m.range()].escape_bytes() )?; } } Ok(()) }) } <file_sep>/regex-automata/src/nfa/thompson/mod.rs /*! Defines a Thompson NFA and provides the [`PikeVM`](pikevm::PikeVM) and [`BoundedBacktracker`](backtrack::BoundedBacktracker) regex engines. A Thompson NFA (non-deterministic finite automaton) is arguably _the_ central data type in this library. It is the result of what is commonly referred to as "regex compilation." That is, turning a regex pattern from its concrete syntax string into something that can run a search looks roughly like this: * A `&str` is parsed into a [`regex-syntax::ast::Ast`](regex_syntax::ast::Ast). * An `Ast` is translated into a [`regex-syntax::hir::Hir`](regex_syntax::hir::Hir). * An `Hir` is compiled into a [`NFA`]. * The `NFA` is then used to build one of a few different regex engines: * An `NFA` is used directly in the `PikeVM` and `BoundedBacktracker` engines. * An `NFA` is used by a [hybrid NFA/DFA](crate::hybrid) to build out a DFA's transition table at search time. * An `NFA`, assuming it is one-pass, is used to build a full [one-pass DFA](crate::dfa::onepass) ahead of time. * An `NFA` is used to build a [full DFA](crate::dfa) ahead of time. The [`meta`](crate::meta) regex engine makes all of these choices for you based on various criteria. However, if you have a lower level use case, _you_ can build any of the above regex engines and use them directly. But you must start here by building an `NFA`. # Details It is perhaps worth expanding a bit more on what it means to go through the `&str`->`Ast`->`Hir`->`NFA` process. * Parsing a string into an `Ast` gives it a structured representation. Crucially, the size and amount of work done in this step is proportional to the size of the original string. No optimization or Unicode handling is done at this point. This means that parsing into an `Ast` has very predictable costs. Moreover, an `Ast` can be roundtripped back to its original pattern string as written. * Translating an `Ast` into an `Hir` is a process by which the structured representation is simplified down to its most fundamental components. Translation deals with flags such as case insensitivity by converting things like `(?i:a)` to `[Aa]`. Translation is also where Unicode tables are consulted to resolve things like `\p{Emoji}` and `\p{Greek}`. It also flattens each character class, regardless of how deeply nested it is, into a single sequence of non-overlapping ranges. All the various literal forms are thrown out in favor of one common representation. Overall, the `Hir` is small enough to fit into your head and makes analysis and other tasks much simpler. * Compiling an `Hir` into an `NFA` formulates the regex into a finite state machine whose transitions are defined over bytes. For example, an `Hir` might have a Unicode character class corresponding to a sequence of ranges defined in terms of `char`. Compilation is then responsible for turning those ranges into a UTF-8 automaton. That is, an automaton that matches the UTF-8 encoding of just the codepoints specified by those ranges. Otherwise, the main job of an `NFA` is to serve as a byte-code of sorts for a virtual machine. It can be seen as a sequence of instructions for how to match a regex. */ #[cfg(feature = "nfa-backtrack")] pub mod backtrack; mod builder; #[cfg(feature = "syntax")] mod compiler; mod error; #[cfg(feature = "syntax")] mod literal_trie; #[cfg(feature = "syntax")] mod map; mod nfa; #[cfg(feature = "nfa-pikevm")] pub mod pikevm; #[cfg(feature = "syntax")] mod range_trie; pub use self::{ builder::Builder, error::BuildError, nfa::{ DenseTransitions, PatternIter, SparseTransitions, State, Transition, NFA, }, }; #[cfg(feature = "syntax")] pub use compiler::{Compiler, Config, WhichCaptures}; <file_sep>/testdata/no-unicode.toml [[test]] name = "invalid-utf8-literal1" regex = '\xFF' haystack = '\xFF' matches = [[0, 1]] unicode = false utf8 = false unescape = true [[test]] name = "mixed" regex = '(?:.+)(?-u)(?:.+)' haystack = '\xCE\x93\xCE\x94\xFF' matches = [[0, 5]] utf8 = false unescape = true [[test]] name = "case1" regex = "a" haystack = "A" matches = [[0, 1]] case-insensitive = true unicode = false [[test]] name = "case2" regex = "[a-z]+" haystack = "AaAaA" matches = [[0, 5]] case-insensitive = true unicode = false [[test]] name = "case3" regex = "[a-z]+" haystack = "aA\u212AaA" matches = [[0, 7]] case-insensitive = true [[test]] name = "case4" regex = "[a-z]+" haystack = "aA\u212AaA" matches = [[0, 2], [5, 7]] case-insensitive = true unicode = false [[test]] name = "negate1" regex = "[^a]" haystack = "δ" matches = [[0, 2]] [[test]] name = "negate2" regex = "[^a]" haystack = "δ" matches = [[0, 1], [1, 2]] unicode = false utf8 = false [[test]] name = "dotstar-prefix1" regex = "a" haystack = '\xFFa' matches = [[1, 2]] unicode = false utf8 = false unescape = true [[test]] name = "dotstar-prefix2" regex = "a" haystack = '\xFFa' matches = [[1, 2]] utf8 = false unescape = true [[test]] name = "null-bytes1" regex = '[^\x00]+\x00' haystack = 'foo\x00' matches = [[0, 4]] unicode = false utf8 = false unescape = true [[test]] name = "word-ascii" regex = '\w+' haystack = "aδ" matches = [[0, 1]] unicode = false [[test]] name = "word-unicode" regex = '\w+' haystack = "aδ" matches = [[0, 3]] [[test]] name = "decimal-ascii" regex = '\d+' haystack = "1२३9" matches = [[0, 1], [7, 8]] unicode = false [[test]] name = "decimal-unicode" regex = '\d+' haystack = "1२३9" matches = [[0, 8]] [[test]] name = "space-ascii" regex = '\s+' haystack = " \u1680" matches = [[0, 1]] unicode = false [[test]] name = "space-unicode" regex = '\s+' haystack = " \u1680" matches = [[0, 4]] [[test]] # See: https://github.com/rust-lang/regex/issues/484 name = "iter1-bytes" regex = '' haystack = "☃" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] utf8 = false [[test]] # See: https://github.com/rust-lang/regex/issues/484 name = "iter1-utf8" regex = '' haystack = "☃" matches = [[0, 0], [3, 3]] [[test]] # See: https://github.com/rust-lang/regex/issues/484 # Note that iter2-utf8 doesn't make sense here, since the input isn't UTF-8. name = "iter2-bytes" regex = '' haystack = 'b\xFFr' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] unescape = true utf8 = false # These test that unanchored prefixes can munch through invalid UTF-8 even when # utf8 is enabled. # # This test actually reflects an interesting simplification in how the Thompson # NFA is constructed. It used to be that the NFA could be built with an # unanchored prefix that either matched any byte or _only_ matched valid UTF-8. # But the latter turns out to be pretty precarious when it comes to prefilters, # because if you search a haystack that contains invalid UTF-8 but have an # unanchored prefix that requires UTF-8, then prefilters are no longer a valid # optimization because you actually have to check that everything is valid # UTF-8. # # Originally, I had thought that we needed a valid UTF-8 unanchored prefix in # order to guarantee that we only match at valid UTF-8 boundaries. But this # isn't actually true! There are really only two things to consider here: # # 1) Will a regex match split an encoded codepoint? No. Because by construction, # we ensure that a MATCH state can only be reached by following valid UTF-8 (assuming # all of the UTF-8 modes are enabled). # # 2) Will a regex match arbitrary bytes that aren't valid UTF-8? Again, no, # assuming all of the UTF-8 modes are enabled. [[test]] name = "unanchored-invalid-utf8-match-100" regex = '[a-z]' haystack = '\xFFa\xFF' matches = [[1, 2]] unescape = true utf8 = false # This test shows that we can still prevent a match from occurring by requiring # that valid UTF-8 match by inserting our own unanchored prefix. Thus, if the # behavior of not munching through invalid UTF-8 anywhere is needed, then it # can be achieved thusly. [[test]] name = "unanchored-invalid-utf8-nomatch" regex = '^(?s:.)*?[a-z]' haystack = '\xFFa\xFF' matches = [] unescape = true utf8 = false # This is a tricky test that makes sure we don't accidentally do a kind of # unanchored search when we've requested that a regex engine not report # empty matches that split a codepoint. This test caught a regression during # development where the code for skipping over bad empty matches would do so # even if the search should have been anchored. This is ultimately what led to # making 'anchored' an 'Input' option, so that it was always clear what kind # of search was being performed. (Before that, whether a search was anchored # or not was a config knob on the regex engine.) This did wind up making DFAs # a little more complex to configure (with their 'StartKind' knob), but it # generally smoothed out everything else. # # Great example of a test whose failure motivated a sweeping API refactoring. [[test]] name = "anchored-iter-empty-utf8" regex = '' haystack = 'a☃z' matches = [[0, 0], [1, 1]] unescape = false utf8 = true anchored = true <file_sep>/src/lib.rs /*! This crate provides routines for searching strings for matches of a [regular expression] (aka "regex"). The regex syntax supported by this crate is similar to other regex engines, but it lacks several features that are not known how to implement efficiently. This includes, but is not limited to, look-around and backreferences. In exchange, all regex searches in this crate have worst case `O(m * n)` time complexity, where `m` is proportional to the size of the regex and `n` is proportional to the size of the string being searched. [regular expression]: https://en.wikipedia.org/wiki/Regular_expression If you just want API documentation, then skip to the [`Regex`] type. Otherwise, here's a quick example showing one way of parsing the output of a grep-like program: ```rust use regex::Regex; let re = Regex::new(r"(?m)^([^:]+):([0-9]+):(.+)$").unwrap(); let hay = "\ path/to/foo:54:Blue Harvest path/to/bar:90:Something, Something, Something, Dark Side path/to/baz:3:It's a Trap! "; let mut results = vec![]; for (_, [path, lineno, line]) in re.captures_iter(hay).map(|c| c.extract()) { results.push((path, lineno.parse::<u64>()?, line)); } assert_eq!(results, vec![ ("path/to/foo", 54, "Blue Harvest"), ("path/to/bar", 90, "Something, Something, Something, Dark Side"), ("path/to/baz", 3, "It's a Trap!"), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Overview The primary type in this crate is a [`Regex`]. Its most important methods are as follows: * [`Regex::new`] compiles a regex using the default configuration. A [`RegexBuilder`] permits setting a non-default configuration. (For example, case insensitive matching, verbose mode and others.) * [`Regex::is_match`] reports whether a match exists in a particular haystack. * [`Regex::find`] reports the byte offsets of a match in a haystack, if one exists. [`Regex::find_iter`] returns an iterator over all such matches. * [`Regex::captures`] returns a [`Captures`], which reports both the byte offsets of a match in a haystack and the byte offsets of each matching capture group from the regex in the haystack. [`Regex::captures_iter`] returns an iterator over all such matches. There is also a [`RegexSet`], which permits searching for multiple regex patterns simultaneously in a single search. However, it currently only reports which patterns match and *not* the byte offsets of a match. Otherwise, this top-level crate documentation is organized as follows: * [Usage](#usage) shows how to add the `regex` crate to your Rust project. * [Examples](#examples) provides a limited selection of regex search examples. * [Performance](#performance) provides a brief summary of how to optimize regex searching speed. * [Unicode](#unicode) discusses support for non-ASCII patterns. * [Syntax](#syntax) enumerates the specific regex syntax supported by this crate. * [Untrusted input](#untrusted-input) discusses how this crate deals with regex patterns or haystacks that are untrusted. * [Crate features](#crate-features) documents the Cargo features that can be enabled or disabled for this crate. * [Other crates](#other-crates) links to other crates in the `regex` family. # Usage The `regex` crate is [on crates.io](https://crates.io/crates/regex) and can be used by adding `regex` to your dependencies in your project's `Cargo.toml`. Or more simply, just run `cargo add regex`. Here is a complete example that creates a new Rust project, adds a dependency on `regex`, creates the source code for a regex search and then runs the program. First, create the project in a new directory: ```text $ mkdir regex-example $ cd regex-example $ cargo init ``` Second, add a dependency on `regex`: ```text $ cargo add regex ``` Third, edit `src/main.rs`. Delete what's there and replace it with this: ``` use regex::Regex; fn main() { let re = Regex::new(r"Hello (?<name>\w+)!").unwrap(); let Some(caps) = re.captures("Hello Murphy!") else { println!("no match!"); return; }; println!("The name is: {}", &caps["name"]); } ``` Fourth, run it with `cargo run`: ```text $ cargo run Compiling memchr v2.5.0 Compiling regex-syntax v0.7.1 Compiling aho-corasick v1.0.1 Compiling regex v1.8.1 Compiling regex-example v0.1.0 (/tmp/regex-example) Finished dev [unoptimized + debuginfo] target(s) in 4.22s Running `target/debug/regex-example` The name is: Murphy ``` The first time you run the program will show more output like above. But subsequent runs shouldn't have to re-compile the dependencies. # Examples This section provides a few examples, in tutorial style, showing how to search a haystack with a regex. There are more examples throughout the API documentation. Before starting though, it's worth defining a few terms: * A **regex** is a Rust value whose type is `Regex`. We use `re` as a variable name for a regex. * A **pattern** is the string that is used to build a regex. We use `pat` as a variable name for a pattern. * A **haystack** is the string that is searched by a regex. We use `hay` as a variable name for a haystack. Sometimes the words "regex" and "pattern" are used interchangeably. General use of regular expressions in this crate proceeds by compiling a **pattern** into a **regex**, and then using that regex to search, split or replace parts of a **haystack**. ### Example: find a middle initial We'll start off with a very simple example: a regex that looks for a specific name but uses a wildcard to match a middle initial. Our pattern serves as something like a template that will match a particular name with *any* middle initial. ```rust use regex::Regex; // We use 'unwrap()' here because it would be a bug in our program if the // pattern failed to compile to a regex. Panicking in the presence of a bug // is okay. let re = Regex::new(r"Homer (.)\. Simpson").unwrap(); let hay = "<NAME>"; let Some(caps) = re.captures(hay) else { return }; assert_eq!("J", &caps[1]); ``` There are a few things worth noticing here in our first example: * The `.` is a special pattern meta character that means "match any single character except for new lines." (More precisely, in this crate, it means "match any UTF-8 encoding of any Unicode scalar value other than `\n`.") * We can match an actual `.` literally by escaping it, i.e., `\.`. * We use Rust's [raw strings] to avoid needing to deal with escape sequences in both the regex pattern syntax and in Rust's string literal syntax. If we didn't use raw strings here, we would have had to use `\\.` to match a literal `.` character. That is, `r"\."` and `"\\."` are equivalent patterns. * We put our wildcard `.` instruction in parentheses. These parentheses have a special meaning that says, "make whatever part of the haystack matches within these parentheses available as a capturing group." After finding a match, we access this capture group with `&caps[1]`. [raw strings]: https://doc.rust-lang.org/stable/reference/tokens.html#raw-string-literals Otherwise, we execute a search using `re.captures(hay)` and return from our function if no match occurred. We then reference the middle initial by asking for the part of the haystack that matched the capture group indexed at `1`. (The capture group at index 0 is implicit and always corresponds to the entire match. In this case, that's `<NAME>`.) ### Example: named capture groups Continuing from our middle initial example above, we can tweak the pattern slightly to give a name to the group that matches the middle initial: ```rust use regex::Regex; // Note that (?P<middle>.) is a different way to spell the same thing. let re = Regex::new(r"Homer (?<middle>.)\. Simpson").unwrap(); let hay = "<NAME>"; let Some(caps) = re.captures(hay) else { return }; assert_eq!("J", &caps["middle"]); ``` Giving a name to a group can be useful when there are multiple groups in a pattern. It makes the code referring to those groups a bit easier to understand. ### Example: validating a particular date format This examples shows how to confirm whether a haystack, in its entirety, matches a particular date format: ```rust use regex::Regex; let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); assert!(re.is_match("2010-03-14")); ``` Notice the use of the `^` and `$` anchors. In this crate, every regex search is run with an implicit `(?s:.)*?` at the beginning of its pattern, which allows the regex to match anywhere in a haystack. Anchors, as above, can be used to ensure that the full haystack matches a pattern. This crate is also Unicode aware by default, which means that `\d` might match more than you might expect it to. For example: ```rust use regex::Regex; let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); assert!(re.is_match("𝟚𝟘𝟙𝟘-𝟘𝟛-𝟙𝟜")); ``` To only match an ASCII decimal digit, all of the following are equivalent: * `[0-9]` * `(?-u:\d)` * `[[:digit:]]` * `[\d&&\p{ascii}]` ### Example: finding dates in a haystack In the previous example, we showed how one might validate that a haystack, in its entirety, corresponded to a particular date format. But what if we wanted to extract all things that look like dates in a specific format from a haystack? To do this, we can use an iterator API to find all matches (notice that we've removed the anchors and switched to looking for ASCII-only digits): ```rust use regex::Regex; let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; // 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. let dates: Vec<&str> = re.find_iter(hay).map(|m| m.as_str()).collect(); assert_eq!(dates, vec![ "1865-04-14", "1881-07-02", "1901-09-06", "1963-11-22", ]); ``` We can also iterate over [`Captures`] values instead of [`Match`] values, and that in turn permits accessing each component of the date via capturing groups: ```rust use regex::Regex; let re = Regex::new(r"(?<y>[0-9]{4})-(?<m>[0-9]{2})-(?<d>[0-9]{2})").unwrap(); let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; // 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { // The unwraps are okay because every capture group must match if the whole // regex matches, and in this context, we know we have a match. // // Note that we use `caps.name("y").unwrap().as_str()` instead of // `&caps["y"]` because the lifetime of the former is the same as the // lifetime of `hay` above, but the lifetime of the latter is tied to the // lifetime of `caps` due to how the `Index` trait is defined. let year = caps.name("y").unwrap().as_str(); let month = caps.name("m").unwrap().as_str(); let day = caps.name("d").unwrap().as_str(); (year, month, day) }).collect(); assert_eq!(dates, vec![ ("1865", "04", "14"), ("1881", "07", "02"), ("1901", "09", "06"), ("1963", "11", "22"), ]); ``` ### Example: simpler capture group extraction One can use [`Captures::extract`] to make the code from the previous example a bit simpler in this case: ```rust use regex::Regex; let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { let (_, [year, month, day]) = caps.extract(); (year, month, day) }).collect(); assert_eq!(dates, vec![ ("1865", "04", "14"), ("1881", "07", "02"), ("1901", "09", "06"), ("1963", "11", "22"), ]); ``` `Captures::extract` works by ensuring that the number of matching groups match the number of groups requested via the `[year, month, day]` syntax. If they do, then the substrings for each corresponding capture group are automatically returned in an appropriately sized array. Rust's syntax for pattern matching arrays does the rest. ### Example: replacement with named capture groups Building on the previous example, perhaps we'd like to rearrange the date formats. This can be done by finding each match and replacing it with something different. The [`Regex::replace_all`] routine provides a convenient way to do this, including by supporting references to named groups in the replacement string: ```rust use regex::Regex; let re = Regex::new(r"(?<y>\d{4})-(?<m>\d{2})-(?<d>\d{2})").unwrap(); let before = "1973-01-05, 1975-08-25 and 1980-10-18"; let after = re.replace_all(before, "$m/$d/$y"); assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); ``` The replace methods are actually polymorphic in the replacement, which provides more flexibility than is seen here. (See the documentation for [`Regex::replace`] for more details.) ### Example: verbose mode When your regex gets complicated, you might consider using something other than regex. But if you stick with regex, you can use the `x` flag to enable insignificant whitespace mode or "verbose mode." In this mode, whitespace is treated as insignificant and one may write comments. This may make your patterns easier to comprehend. ```rust use regex::Regex; let re = Regex::new(r"(?x) (?P<y>\d{4}) # the year, including all Unicode digits - (?P<m>\d{2}) # the month, including all Unicode digits - (?P<d>\d{2}) # the day, including all Unicode digits ").unwrap(); let before = "1973-01-05, 1975-08-25 and 1980-10-18"; let after = re.replace_all(before, "$m/$d/$y"); assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); ``` If you wish to match against whitespace in this mode, you can still use `\s`, `\n`, `\t`, etc. For escaping a single space character, you can escape it directly with `\ `, use its hex character code `\x20` or temporarily disable the `x` flag, e.g., `(?-x: )`. ### Example: match multiple regular expressions simultaneously This demonstrates how to use a [`RegexSet`] to match multiple (possibly overlapping) regexes in a single scan of a haystack: ```rust use regex::RegexSet; let set = RegexSet::new(&[ r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", ]).unwrap(); // Iterate over and collect all of the matches. Each match corresponds to the // ID of the matching pattern. let matches: Vec<_> = set.matches("foobar").into_iter().collect(); assert_eq!(matches, vec![0, 2, 3, 4, 6]); // You can also test whether a particular regex matched: let matches = set.matches("foobar"); assert!(!matches.matched(5)); assert!(matches.matched(6)); ``` # Performance This section briefly discusses a few concerns regarding the speed and resource usage of regexes. ### Only ask for what you need When running a search with a regex, there are generally three different types of information one can ask for: 1. Does a regex match in a haystack? 2. Where does a regex match in a haystack? 3. Where do each of the capturing groups match in a haystack? Generally speaking, this crate could provide a function to answer only #3, which would subsume #1 and #2 automatically. However, it can be significantly more expensive to compute the location of capturing group matches, so it's best not to do it if you don't need to. Therefore, only ask for what you need. For example, don't use [`Regex::find`] if you only need to test if a regex matches a haystack. Use [`Regex::is_match`] instead. ### Unicode can impact memory usage and search speed This crate has first class support for Unicode and it is **enabled by default**. In many cases, the extra memory required to support it will be negligible and it typically won't impact search speed. But it can in some cases. With respect to memory usage, the impact of Unicode principally manifests through the use of Unicode character classes. Unicode character classes tend to be quite large. For example, `\w` by default matches around 140,000 distinct codepoints. This requires additional memory, and tends to slow down regex compilation. While a `\w` here and there is unlikely to be noticed, writing `\w{100}` will for example result in quite a large regex by default. Indeed, `\w` is considerably larger than its ASCII-only version, so if your requirements are satisfied by ASCII, it's probably a good idea to stick to ASCII classes. The ASCII-only version of `\w` can be spelled in a number of ways. All of the following are equivalent: * `[0-9A-Za-z_]` * `(?-u:\w)` * `[[:word:]]` * `[\w&&\p{ascii}]` With respect to search speed, Unicode tends to be handled pretty well, even when using large Unicode character classes. However, some of the faster internal regex engines cannot handle a Unicode aware word boundary assertion. So if you don't need Unicode-aware word boundary assertions, you might consider using `(?-u:\b)` instead of `\b`, where the former uses an ASCII-only definition of a word character. ### Literals might accelerate searches This crate tends to be quite good at recognizing literals in a regex pattern and using them to accelerate a search. If it is at all possible to include some kind of literal in your pattern, then it might make search substantially faster. For example, in the regex `\w+@\w+`, the engine will look for occurrences of `@` and then try a reverse match for `\w+` to find the start position. ### Avoid re-compiling regexes, especially in a loop It is an anti-pattern to compile the same pattern in a loop since regex compilation is typically expensive. (It takes anywhere from a few microseconds to a few **milliseconds** depending on the size of the pattern.) Not only is compilation itself expensive, but this also prevents optimizations that reuse allocations internally to the regex engine. In Rust, it can sometimes be a pain to pass regexes around if they're used from inside a helper function. Instead, we recommend using crates like [`once_cell`] and [`lazy_static`] to ensure that patterns are compiled exactly once. [`once_cell`]: https://crates.io/crates/once_cell [`lazy_static`]: https://crates.io/crates/lazy_static This example shows how to use `once_cell`: ```rust use { once_cell::sync::Lazy, regex::Regex, }; fn some_helper_function(haystack: &str) -> bool { static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"...").unwrap()); RE.is_match(haystack) } fn main() { assert!(some_helper_function("abc")); assert!(!some_helper_function("ac")); } ``` Specifically, in this example, the regex will be compiled when it is used for the first time. On subsequent uses, it will reuse the previously built `Regex`. Notice how one can define the `Regex` locally to a specific function. ### Sharing a regex across threads can result in contention While a single `Regex` can be freely used from multiple threads simultaneously, there is a small synchronization cost that must be paid. Generally speaking, one shouldn't expect to observe this unless the principal task in each thread is searching with the regex *and* most searches are on short haystacks. In this case, internal contention on shared resources can spike and increase latency, which in turn may slow down each individual search. One can work around this by cloning each `Regex` before sending it to another thread. The cloned regexes will still share the same internal read-only portion of its compiled state (it's reference counted), but each thread will get optimized access to the mutable space that is used to run a search. In general, there is no additional cost in memory to doing this. The only cost is the added code complexity required to explicitly clone the regex. (If you share the same `Regex` across multiple threads, each thread still gets its own mutable space, but accessing that space is slower.) # Unicode This section discusses what kind of Unicode support this regex library has. Before showing some examples, we'll summarize the relevant points: * This crate almost fully implements "Basic Unicode Support" (Level 1) as specified by the [Unicode Technical Standard #18][UTS18]. The full details of what is supported are documented in [UNICODE.md] in the root of the regex crate repository. There is virtually no support for "Extended Unicode Support" (Level 2) from UTS#18. * The top-level [`Regex`] runs searches *as if* iterating over each of the codepoints in the haystack. That is, the fundamental atom of matching is a single codepoint. * [`bytes::Regex`], in contrast, permits disabling Unicode mode for part of all of your pattern in all cases. When Unicode mode is disabled, then a search is run *as if* iterating over each byte in the haystack. That is, the fundamental atom of matching is a single byte. (A top-level `Regex` also permits disabling Unicode and thus matching *as if* it were one byte at a time, but only when doing so wouldn't permit matching invalid UTF-8.) * When Unicode mode is enabled (the default), `.` will match an entire Unicode scalar value, even when it is encoded using multiple bytes. When Unicode mode is disabled (e.g., `(?-u:.)`), then `.` will match a single byte in all cases. * The character classes `\w`, `\d` and `\s` are all Unicode-aware by default. Use `(?-u:\w)`, `(?-u:\d)` and `(?-u:\s)` to get their ASCII-only definitions. * Similarly, `\b` and `\B` use a Unicode definition of a "word" character. To get ASCII-only word boundaries, use `(?-u:\b)` and `(?-u:\B)`. * `^` and `$` are **not** Unicode-aware in multi-line mode. Namely, they only recognize `\n` (assuming CRLF mode is not enabled) and not any of the other forms of line terminators defined by Unicode. * Case insensitive searching is Unicode-aware and uses simple case folding. * Unicode general categories, scripts and many boolean properties are available by default via the `\p{property name}` syntax. * In all cases, matches are reported using byte offsets. Or more precisely, UTF-8 code unit offsets. This permits constant time indexing and slicing of the haystack. [UTS18]: https://unicode.org/reports/tr18/ [UNICODE.md]: https://github.com/rust-lang/regex/blob/master/UNICODE.md Patterns themselves are **only** interpreted as a sequence of Unicode scalar values. This means you can use Unicode characters directly in your pattern: ```rust use regex::Regex; let re = Regex::new(r"(?i)Δ+").unwrap(); let m = re.find("ΔδΔ").unwrap(); assert_eq!((0, 6), (m.start(), m.end())); // alternatively: assert_eq!(0..6, m.range()); ``` As noted above, Unicode general categories, scripts, script extensions, ages and a smattering of boolean properties are available as character classes. For example, you can match a sequence of numerals, Greek or Cherokee letters: ```rust use regex::Regex; let re = Regex::new(r"[\pN\p{Greek}\p{Cherokee}]+").unwrap(); let m = re.find("abcΔᎠβⅠᏴγδⅡxyz").unwrap(); assert_eq!(3..23, m.range()); ``` While not specific to Unicode, this library also supports character class set operations. Namely, one can nest character classes arbitrarily and perform set operations on them. Those set operations are union (the default), intersection, difference and symmetric difference. These set operations tend to be most useful with Unicode character classes. For example, to match any codepoint that is both in the `Greek` script and in the `Letter` general category: ```rust use regex::Regex; let re = Regex::new(r"[\p{Greek}&&\pL]+").unwrap(); let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect(); assert_eq!(subs, vec!["ΔδΔ", "ΔδΔ"]); // If we just matches on Greek, then all codepoints would match! let re = Regex::new(r"\p{Greek}+").unwrap(); let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect(); assert_eq!(subs, vec!["ΔδΔ𐅌ΔδΔ"]); ``` ### Opt out of Unicode support The [`bytes::Regex`] type that can be used to search `&[u8]` haystacks. By default, haystacks are conventionally treated as UTF-8 just like it is with the main `Regex` type. However, this behavior can be disabled by turning off the `u` flag, even if doing so could result in matching invalid UTF-8. For example, when the `u` flag is disabled, `.` will match any byte instead of any Unicode scalar value. Disabling the `u` flag is also possible with the standard `&str`-based `Regex` type, but it is only allowed where the UTF-8 invariant is maintained. For example, `(?-u:\w)` is an ASCII-only `\w` character class and is legal in an `&str`-based `Regex`, but `(?-u:\W)` will attempt to match *any byte* that isn't in `(?-u:\w)`, which in turn includes bytes that are invalid UTF-8. Similarly, `(?-u:\xFF)` will attempt to match the raw byte `\xFF` (instead of `U+00FF`), which is invalid UTF-8 and therefore is illegal in `&str`-based regexes. Finally, since Unicode support requires bundling large Unicode data tables, this crate exposes knobs to disable the compilation of those data tables, which can be useful for shrinking binary size and reducing compilation times. For details on how to do that, see the section on [crate features](#crate-features). # Syntax The syntax supported in this crate is documented below. Note that the regular expression parser and abstract syntax are exposed in a separate crate, [`regex-syntax`](https://docs.rs/regex-syntax). ### Matching one character <pre class="rust"> . any character except new line (includes new line with s flag) [0-9] any ASCII digit \d digit (\p{Nd}) \D not digit \pX Unicode character class identified by a one-letter name \p{Greek} Unicode character class (general category or script) \PX Negated Unicode character class identified by a one-letter name \P{Greek} negated Unicode character class (general category or script) </pre> ### Character classes <pre class="rust"> [xyz] A character class matching either x, y or z (union). [^xyz] A character class matching any character except x, y and z. [a-z] A character class matching any character in range a-z. [[:alpha:]] ASCII character class ([A-Za-z]) [[:^alpha:]] Negated ASCII character class ([^A-Za-z]) [x[^xyz]] Nested/grouping character class (matching any character except y and z) [a-y&&xyz] Intersection (matching x or y) [0-9&&[^4]] Subtraction using intersection and negation (matching 0-9 except 4) [0-9--4] Direct subtraction (matching 0-9 except 4) [a-g~~b-h] Symmetric difference (matching `a` and `h` only) [\[\]] Escaping in character classes (matching [ or ]) [a&&b] An empty character class matching nothing </pre> Any named character class may appear inside a bracketed `[...]` character class. For example, `[\p{Greek}[:digit:]]` matches any ASCII digit or any codepoint in the `Greek` script. `[\p{Greek}&&\pL]` matches Greek letters. Precedence in character classes, from most binding to least: 1. Ranges: `[a-cd]` == `[[a-c]d]` 2. Union: `[ab&&bc]` == `[[ab]&&[bc]]` 3. Intersection, difference, symmetric difference. All three have equivalent precedence, and are evaluated in left-to-right order. For example, `[\pL--\p{Greek}&&\p{Uppercase}]` == `[[\pL--\p{Greek}]&&\p{Uppercase}]`. 4. Negation: `[^a-z&&b]` == `[^[a-z&&b]]`. ### Composites <pre class="rust"> xy concatenation (x followed by y) x|y alternation (x or y, prefer x) </pre> This example shows how an alternation works, and what it means to prefer a branch in the alternation over subsequent branches. ``` use regex::Regex; let haystack = "samwise"; // If 'samwise' comes first in our alternation, then it is // preferred as a match, even if the regex engine could // technically detect that 'sam' led to a match earlier. let re = Regex::new(r"samwise|sam").unwrap(); assert_eq!("samwise", re.find(haystack).unwrap().as_str()); // But if 'sam' comes first, then it will match instead. // In this case, it is impossible for 'samwise' to match // because 'sam' is a prefix of it. let re = Regex::new(r"sam|samwise").unwrap(); assert_eq!("sam", re.find(haystack).unwrap().as_str()); ``` ### Repetitions <pre class="rust"> x* zero or more of x (greedy) x+ one or more of x (greedy) x? zero or one of x (greedy) x*? zero or more of x (ungreedy/lazy) x+? one or more of x (ungreedy/lazy) x?? zero or one of x (ungreedy/lazy) x{n,m} at least n x and at most m x (greedy) x{n,} at least n x (greedy) x{n} exactly n x x{n,m}? at least n x and at most m x (ungreedy/lazy) x{n,}? at least n x (ungreedy/lazy) x{n}? exactly n x </pre> ### Empty matches <pre class="rust"> ^ the beginning of a haystack (or start-of-line with multi-line mode) $ the end of a haystack (or end-of-line with multi-line mode) \A only the beginning of a haystack (even with multi-line mode enabled) \z only the end of a haystack (even with multi-line mode enabled) \b a Unicode word boundary (\w on one side and \W, \A, or \z on other) \B not a Unicode word boundary </pre> The empty regex is valid and matches the empty string. For example, the empty regex matches `abc` at positions `0`, `1`, `2` and `3`. When using the top-level [`Regex`] on `&str` haystacks, an empty match that splits a codepoint is guaranteed to never be returned. However, such matches are permitted when using a [`bytes::Regex`]. For example: ```rust let re = regex::Regex::new(r"").unwrap(); let ranges: Vec<_> = re.find_iter("💩").map(|m| m.range()).collect(); assert_eq!(ranges, vec![0..0, 4..4]); let re = regex::bytes::Regex::new(r"").unwrap(); let ranges: Vec<_> = re.find_iter("💩".as_bytes()).map(|m| m.range()).collect(); assert_eq!(ranges, vec![0..0, 1..1, 2..2, 3..3, 4..4]); ``` Note that an empty regex is distinct from a regex that can never match. For example, the regex `[a&&b]` is a character class that represents the intersection of `a` and `b`. That intersection is empty, which means the character class is empty. Since nothing is in the empty set, `[a&&b]` matches nothing, not even the empty string. ### Grouping and flags <pre class="rust"> (exp) numbered capture group (indexed by opening parenthesis) (?P&lt;name&gt;exp) named (also numbered) capture group (names must be alpha-numeric) (?&lt;name&gt;exp) named (also numbered) capture group (names must be alpha-numeric) (?:exp) non-capturing group (?flags) set flags within current group (?flags:exp) set flags for exp (non-capturing) </pre> Capture group names must be any sequence of alpha-numeric Unicode codepoints, in addition to `.`, `_`, `[` and `]`. Names must start with either an `_` or an alphabetic codepoint. Alphabetic codepoints correspond to the `Alphabetic` Unicode property, while numeric codepoints correspond to the union of the `Decimal_Number`, `Letter_Number` and `Other_Number` general categories. Flags are each a single character. For example, `(?x)` sets the flag `x` and `(?-x)` clears the flag `x`. Multiple flags can be set or cleared at the same time: `(?xy)` sets both the `x` and `y` flags and `(?x-y)` sets the `x` flag and clears the `y` flag. All flags are by default disabled unless stated otherwise. They are: <pre class="rust"> i case-insensitive: letters match both upper and lower case m multi-line mode: ^ and $ match begin/end of line s allow . to match \n R enables CRLF mode: when multi-line mode is enabled, \r\n is used U swap the meaning of x* and x*? u Unicode support (enabled by default) x verbose mode, ignores whitespace and allow line comments (starting with `#`) </pre> Note that in verbose mode, whitespace is ignored everywhere, including within character classes. To insert whitespace, use its escaped form or a hex literal. For example, `\ ` or `\x20` for an ASCII space. Flags can be toggled within a pattern. Here's an example that matches case-insensitively for the first part but case-sensitively for the second part: ```rust use regex::Regex; let re = Regex::new(r"(?i)a+(?-i)b+").unwrap(); let m = re.find("AaAaAbbBBBb").unwrap(); assert_eq!(m.as_str(), "AaAaAbb"); ``` Notice that the `a+` matches either `a` or `A`, but the `b+` only matches `b`. Multi-line mode means `^` and `$` no longer match just at the beginning/end of the input, but also at the beginning/end of lines: ``` use regex::Regex; let re = Regex::new(r"(?m)^line \d+").unwrap(); let m = re.find("line one\nline 2\n").unwrap(); assert_eq!(m.as_str(), "line 2"); ``` Note that `^` matches after new lines, even at the end of input: ``` use regex::Regex; let re = Regex::new(r"(?m)^").unwrap(); let m = re.find_iter("test\n").last().unwrap(); assert_eq!((m.start(), m.end()), (5, 5)); ``` When both CRLF mode and multi-line mode are enabled, then `^` and `$` will match either `\r` and `\n`, but never in the middle of a `\r\n`: ``` use regex::Regex; let re = Regex::new(r"(?mR)^foo$").unwrap(); let m = re.find("\r\nfoo\r\n").unwrap(); assert_eq!(m.as_str(), "foo"); ``` Unicode mode can also be selectively disabled, although only when the result *would not* match invalid UTF-8. One good example of this is using an ASCII word boundary instead of a Unicode word boundary, which might make some regex searches run faster: ```rust use regex::Regex; let re = Regex::new(r"(?-u:\b).+(?-u:\b)").unwrap(); let m = re.find("$$abc$$").unwrap(); assert_eq!(m.as_str(), "abc"); ``` ### Escape sequences Note that this includes all possible escape sequences, even ones that are documented elsewhere. <pre class="rust"> \* literal *, applies to all ASCII except [0-9A-Za-z<>] \a bell (\x07) \f form feed (\x0C) \t horizontal tab \n new line \r carriage return \v vertical tab (\x0B) \A matches at the beginning of a haystack \z matches at the end of a haystack \b word boundary assertion \B negated word boundary assertion \123 octal character code, up to three digits (when enabled) \x7F hex character code (exactly two digits) \x{10FFFF} any hex character code corresponding to a Unicode code point \u007F hex character code (exactly four digits) \u{7F} any hex character code corresponding to a Unicode code point \U0000007F hex character code (exactly eight digits) \U{7F} any hex character code corresponding to a Unicode code point \p{Letter} Unicode character class \P{Letter} negated Unicode character class \d, \s, \w Perl character class \D, \S, \W negated Perl character class </pre> ### Perl character classes (Unicode friendly) These classes are based on the definitions provided in [UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties): <pre class="rust"> \d digit (\p{Nd}) \D not digit \s whitespace (\p{White_Space}) \S not whitespace \w word character (\p{Alphabetic} + \p{M} + \d + \p{Pc} + \p{Join_Control}) \W not word character </pre> ### ASCII character classes These classes are based on the definitions provided in [UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties): <pre class="rust"> [[:alnum:]] alphanumeric ([0-9A-Za-z]) [[:alpha:]] alphabetic ([A-Za-z]) [[:ascii:]] ASCII ([\x00-\x7F]) [[:blank:]] blank ([\t ]) [[:cntrl:]] control ([\x00-\x1F\x7F]) [[:digit:]] digits ([0-9]) [[:graph:]] graphical ([!-~]) [[:lower:]] lower case ([a-z]) [[:print:]] printable ([ -~]) [[:punct:]] punctuation ([!-/:-@\[-`{-~]) [[:space:]] whitespace ([\t\n\v\f\r ]) [[:upper:]] upper case ([A-Z]) [[:word:]] word characters ([0-9A-Za-z_]) [[:xdigit:]] hex digit ([0-9A-Fa-f]) </pre> # Untrusted input This crate is meant to be able to run regex searches on untrusted haystacks without fear of [ReDoS]. This crate also, to a certain extent, supports untrusted patterns. [ReDoS]: https://en.wikipedia.org/wiki/ReDoS This crate differs from most (but not all) other regex engines in that it doesn't use unbounded backtracking to run a regex search. In those cases, one generally cannot use untrusted patterns *or* untrusted haystacks because it can be very difficult to know whether a particular pattern will result in catastrophic backtracking or not. We'll first discuss how this crate deals with untrusted inputs and then wrap it up with a realistic discussion about what practice really looks like. ### Panics Outside of clearly documented cases, most APIs in this crate are intended to never panic regardless of the inputs given to them. For example, `Regex::new`, `Regex::is_match`, `Regex::find` and `Regex::captures` should never panic. That is, it is an API promise that those APIs will never panic no matter what inputs are given to them. With that said, regex engines are complicated beasts, and providing a rock solid guarantee that these APIs literally never panic is essentially equivalent to saying, "there are no bugs in this library." That is a bold claim, and not really one that can be feasibly made with a straight face. Don't get the wrong impression here. This crate is extensively tested, not just with unit and integration tests, but also via fuzz testing. For example, this crate is part of the [OSS-fuzz project]. Panics should be incredibly rare, but it is possible for bugs to exist, and thus possible for a panic to occur. If you need a rock solid guarantee against panics, then you should wrap calls into this library with [`std::panic::catch_unwind`]. It's also worth pointing out that this library will *generally* panic when other regex engines would commit undefined behavior. When undefined behavior occurs, your program might continue as if nothing bad has happened, but it also might mean your program is open to the worst kinds of exploits. In contrast, the worst thing a panic can do is a denial of service. [OSS-fuzz project]: https://android.googlesource.com/platform/external/oss-fuzz/+/refs/tags/android-t-preview-1/projects/rust-regex/ [`std::panic::catch_unwind`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html ### Untrusted patterns The principal way this crate deals with them is by limiting their size by default. The size limit can be configured via [`RegexBuilder::size_limit`]. The idea of a size limit is that compiling a pattern into a `Regex` will fail if it becomes "too big." Namely, while *most* resources consumed by compiling a regex are approximately proportional (albeit with some high constant factors in some cases, such as with Unicode character classes) to the length of the pattern itself, there is one particular exception to this: counted repetitions. Namely, this pattern: ```text a{5}{5}{5}{5}{5}{5} ``` Is equivalent to this pattern: ```text a{15625} ``` In both of these cases, the actual pattern string is quite small, but the resulting `Regex` value is quite large. Indeed, as the first pattern shows, it isn't enough to locally limit the size of each repetition because they can be stacked in a way that results in exponential growth. To provide a bit more context, a simplified view of regex compilation looks like this: * The pattern string is parsed into a structured representation called an AST. Counted repetitions are not expanded and Unicode character classes are not looked up in this stage. That is, the size of the AST is proportional to the size of the pattern with "reasonable" constant factors. In other words, one can reasonably limit the memory used by an AST by limiting the length of the pattern string. * The AST is translated into an HIR. Counted repetitions are still *not* expanded at this stage, but Unicode character classes are embedded into the HIR. The memory usage of a HIR is still proportional to the length of the original pattern string, but the constant factors---mostly as a result of Unicode character classes---can be quite high. Still though, the memory used by an HIR can be reasonably limited by limiting the length of the pattern string. * The HIR is compiled into a [Thompson NFA]. This is the stage at which something like `\w{5}` is rewritten to `\w\w\w\w\w`. Thus, this is the stage at which [`RegexBuilder::size_limit`] is enforced. If the NFA exceeds the configured size, then this stage will fail. [Thompson NFA]: https://en.wikipedia.org/wiki/Thompson%27s_construction The size limit helps avoid two different kinds of exorbitant resource usage: * It avoids permitting exponential memory usage based on the size of the pattern string. * It avoids long search times. This will be discussed in more detail in the next section, but worst case search time *is* dependent on the size of the regex. So keeping regexes limited to a reasonable size is also a way of keeping search times reasonable. Finally, it's worth pointing out that regex compilation is guaranteed to take worst case `O(m)` time, where `m` is proportional to the size of regex. The size of the regex here is *after* the counted repetitions have been expanded. **Advice for those using untrusted regexes**: limit the pattern length to something small and expand it as needed. Configure [`RegexBuilder::size_limit`] to something small and then expand it as needed. ### Untrusted haystacks The main way this crate guards against searches from taking a long time is by using algorithms that guarantee a `O(m * n)` worst case time and space bound. Namely: * `m` is proportional to the size of the regex, where the size of the regex includes the expansion of all counted repetitions. (See the previous section on untrusted patterns.) * `n` is proportional to the length, in bytes, of the haystack. In other words, if you consider `m` to be a constant (for example, the regex pattern is a literal in the source code), then the search can be said to run in "linear time." Or equivalently, "linear time with respect to the size of the haystack." But the `m` factor here is important not to ignore. If a regex is particularly big, the search times can get quite slow. This is why, in part, [`RegexBuilder::size_limit`] exists. **Advice for those searching untrusted haystacks**: As long as your regexes are not enormous, you should expect to be able to search untrusted haystacks without fear. If you aren't sure, you should benchmark it. Unlike backtracking engines, if your regex is so big that it's likely to result in slow searches, this is probably something you'll be able to observe regardless of what the haystack is made up of. ### Iterating over matches One thing that is perhaps easy to miss is that the worst case time complexity bound of `O(m * n)` applies to methods like [`Regex::is_match`], [`Regex::find`] and [`Regex::captures`]. It does **not** apply to [`Regex::find_iter`] or [`Regex::captures_iter`]. Namely, since iterating over all matches can execute many searches, and each search can scan the entire haystack, the worst case time complexity for iterators is `O(m * n^2)`. One example of where this occurs is when a pattern consists of an alternation, where an earlier branch of the alternation requires scanning the entire haystack only to discover that there is no match. It also requires a later branch of the alternation to have matched at the beginning of the search. For example, consider the pattern `.*[^A-Z]|[A-Z]` and the haystack `AAAAA`. The first search will scan to the end looking for matches of `.*[^A-Z]` even though a finite automata engine (as in this crate) knows that `[A-Z]` has already matched the first character of the haystack. This is due to the greedy nature of regex searching. That first search will report a match at the first `A` only after scanning to the end to discover that no other match exists. The next search then begins at the second `A` and the behavior repeats. There is no way to avoid this. This means that if both patterns and haystacks are untrusted and you're iterating over all matches, you're susceptible to worst case quadratic time complexity. One possible way to mitigate this is to drop down to the lower level `regex-automata` crate and use its `meta::Regex` iterator APIs. There, you can configure the search to operate in "earliest" mode by passing a `Input::new(haystack).earliest(true)` to `meta::Regex::find_iter` (for example). By enabling this mode, you give up the normal greedy match semantics of regex searches and instead ask the regex engine to immediately stop as soon as a match has been found. Enabling this mode will thus restore the worst case `O(m * n)` time complexity bound, but at the cost of different semantics. ### Untrusted inputs in practice While providing a `O(m * n)` worst case time bound on all searches goes a long way toward preventing [ReDoS], that doesn't mean every search you can possibly run will complete without burning CPU time. In general, there are a few ways for the `m * n` time bound to still bite you: * You are searching an exceptionally long haystack. No matter how you slice it, a longer haystack will take more time to search. This crate may often make very quick work of even long haystacks because of its literal optimizations, but those aren't available for all regexes. * Unicode character classes can cause searches to be quite slow in some cases. This is especially true when they are combined with counted repetitions. While the regex size limit above will protect you from the most egregious cases, the default size limit still permits pretty big regexes that can execute more slowly than one might expect. * While routines like [`Regex::find`] and [`Regex::captures`] guarantee worst case `O(m * n)` search time, routines like [`Regex::find_iter`] and [`Regex::captures_iter`] actually have worst case `O(m * n^2)` search time. This is because `find_iter` runs many searches, and each search takes worst case `O(m * n)` time. Thus, iteration of all matches in a haystack has worst case `O(m * n^2)`. A good example of a pattern that exhibits this is `(?:A+){1000}|` or even `.*[^A-Z]|[A-Z]`. In general, unstrusted haystacks are easier to stomach than untrusted patterns. Untrusted patterns give a lot more control to the caller to impact the performance of a search. In many cases, a regex search will actually execute in average case `O(n)` time (i.e., not dependent on the size of the regex), but this can't be guaranteed in general. Therefore, permitting untrusted patterns means that your only line of defense is to put a limit on how big `m` (and perhaps also `n`) can be in `O(m * n)`. `n` is limited by simply inspecting the length of the haystack while `m` is limited by *both* applying a limit to the length of the pattern *and* a limit on the compiled size of the regex via [`RegexBuilder::size_limit`]. It bears repeating: if you're accepting untrusted patterns, it would be a good idea to start with conservative limits on `m` and `n`, and then carefully increase them as needed. # Crate features By default, this crate tries pretty hard to make regex matching both as fast as possible and as correct as it can be. This means that there is a lot of code dedicated to performance, the handling of Unicode data and the Unicode data itself. Overall, this leads to more dependencies, larger binaries and longer compile times. This trade off may not be appropriate in all cases, and indeed, even when all Unicode and performance features are disabled, one is still left with a perfectly serviceable regex engine that will work well in many cases. (Note that code is not arbitrarily reducible, and for this reason, the [`regex-lite`](https://docs.rs/regex-lite) crate exists to provide an even more minimal experience by cutting out Unicode and performance, but still maintaining the linear search time bound.) This crate exposes a number of features for controlling that trade off. Some of these features are strictly performance oriented, such that disabling them won't result in a loss of functionality, but may result in worse performance. Other features, such as the ones controlling the presence or absence of Unicode data, can result in a loss of functionality. For example, if one disables the `unicode-case` feature (described below), then compiling the regex `(?i)a` will fail since Unicode case insensitivity is enabled by default. Instead, callers must use `(?i-u)a` to disable Unicode case folding. Stated differently, enabling or disabling any of the features below can only add or subtract from the total set of valid regular expressions. Enabling or disabling a feature will never modify the match semantics of a regular expression. Most features below are enabled by default. Features that aren't enabled by default are noted. ### Ecosystem features * **std** - When enabled, this will cause `regex` to use the standard library. In terms of APIs, `std` causes error types to implement the `std::error::Error` trait. Enabling `std` will also result in performance optimizations, including SIMD and faster synchronization primitives. Notably, **disabling the `std` feature will result in the use of spin locks**. To use a regex engine without `std` and without spin locks, you'll need to drop down to the [`regex-automata`](https://docs.rs/regex-automata) crate. * **logging** - When enabled, the `log` crate is used to emit messages about regex compilation and search strategies. This is **disabled by default**. This is typically only useful to someone working on this crate's internals, but might be useful if you're doing some rabbit hole performance hacking. Or if you're just interested in the kinds of decisions being made by the regex engine. ### Performance features * **perf** - Enables all performance related features except for `perf-dfa-full`. This feature is enabled by default is intended to cover all reasonable features that improve performance, even if more are added in the future. * **perf-dfa** - Enables the use of a lazy DFA for matching. The lazy DFA is used to compile portions of a regex to a very fast DFA on an as-needed basis. This can result in substantial speedups, usually by an order of magnitude on large haystacks. The lazy DFA does not bring in any new dependencies, but it can make compile times longer. * **perf-dfa-full** - Enables the use of a full DFA for matching. Full DFAs are problematic because they have worst case `O(2^n)` construction time. For this reason, when this feature is enabled, full DFAs are only used for very small regexes and a very small space bound is used during determinization to avoid the DFA from blowing up. This feature is not enabled by default, even as part of `perf`, because it results in fairly sizeable increases in binary size and compilation time. It can result in faster search times, but they tend to be more modest and limited to non-Unicode regexes. * **perf-onepass** - Enables the use of a one-pass DFA for extracting the positions of capture groups. This optimization applies to a subset of certain types of NFAs and represents the fastest engine in this crate for dealing with capture groups. * **perf-backtrack** - Enables the use of a bounded backtracking algorithm for extracting the positions of capture groups. This usually sits between the slowest engine (the PikeVM) and the fastest engine (one-pass DFA) for extracting capture groups. It's used whenever the regex is not one-pass and is small enough. * **perf-inline** - Enables the use of aggressive inlining inside match routines. This reduces the overhead of each match. The aggressive inlining, however, increases compile times and binary size. * **perf-literal** - Enables the use of literal optimizations for speeding up matches. In some cases, literal optimizations can result in speedups of _several_ orders of magnitude. Disabling this drops the `aho-corasick` and `memchr` dependencies. * **perf-cache** - This feature used to enable a faster internal cache at the cost of using additional dependencies, but this is no longer an option. A fast internal cache is now used unconditionally with no additional dependencies. This may change in the future. ### Unicode features * **unicode** - Enables all Unicode features. This feature is enabled by default, and will always cover all Unicode features, even if more are added in the future. * **unicode-age** - Provide the data for the [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). This makes it possible to use classes like `\p{Age:6.0}` to refer to all codepoints first introduced in Unicode 6.0 * **unicode-bool** - Provide the data for numerous Unicode boolean properties. The full list is not included here, but contains properties like `Alphabetic`, `Emoji`, `Lowercase`, `Math`, `Uppercase` and `White_Space`. * **unicode-case** - Provide the data for case insensitive matching using [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). * **unicode-gencat** - Provide the data for [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). This includes, but is not limited to, `Decimal_Number`, `Letter`, `Math_Symbol`, `Number` and `Punctuation`. * **unicode-perl** - Provide the data for supporting the Unicode-aware Perl character classes, corresponding to `\w`, `\s` and `\d`. This is also necessary for using Unicode-aware word boundary assertions. Note that if this feature is disabled, the `\s` and `\d` character classes are still available if the `unicode-bool` and `unicode-gencat` features are enabled, respectively. * **unicode-script** - Provide the data for [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, `Latin` and `Thai`. * **unicode-segment** - Provide the data necessary to provide the properties used to implement the [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and `\p{sb=ATerm}`. # Other crates This crate has two required dependencies and several optional dependencies. This section briefly describes them with the goal of raising awareness of how different components of this crate may be used independently. It is somewhat unusual for a regex engine to have dependencies, as most regex libraries are self contained units with no dependencies other than a particular environment's standard library. Indeed, for other similarly optimized regex engines, most or all of the code in the dependencies of this crate would normally just be unseparable or coupled parts of the crate itself. But since Rust and its tooling ecosystem make the use of dependencies so easy, it made sense to spend some effort de-coupling parts of this crate and making them independently useful. We only briefly describe each crate here. * [`regex-lite`](https://docs.rs/regex-lite) is not a dependency of `regex`, but rather, a standalone zero-dependency simpler version of `regex` that prioritizes compile times and binary size. In exchange, it eschews Unicode support and performance. Its match semantics are as identical as possible to the `regex` crate, and for the things it supports, its APIs are identical to the APIs in this crate. In other words, for a lot of use cases, it is a drop-in replacement. * [`regex-syntax`](https://docs.rs/regex-syntax) provides a regular expression parser via `Ast` and `Hir` types. It also provides routines for extracting literals from a pattern. Folks can use this crate to do analysis, or even to build their own regex engine without having to worry about writing a parser. * [`regex-automata`](https://docs.rs/regex-automata) provides the regex engines themselves. One of the downsides of finite automata based regex engines is that they often need multiple internal engines in order to have similar or better performance than an unbounded backtracking engine in practice. `regex-automata` in particular provides public APIs for a PikeVM, a bounded backtracker, a one-pass DFA, a lazy DFA, a fully compiled DFA and a meta regex engine that combines all them together. It also has native multi-pattern support and provides a way to compile and serialize full DFAs such that they can be loaded and searched in a no-std no-alloc environment. `regex-automata` itself doesn't even have a required dependency on `regex-syntax`! * [`memchr`](https://docs.rs/memchr) provides low level SIMD vectorized routines for quickly finding the location of single bytes or even substrings in a haystack. In other words, it provides fast `memchr` and `memmem` routines. These are used by this crate in literal optimizations. * [`aho-corasick`](https://docs.rs/aho-corasick) provides multi-substring search. It also provides SIMD vectorized routines in the case where the number of substrings to search for is relatively small. The `regex` crate also uses this for literal optimizations. */ #![no_std] #![deny(missing_docs)] #![cfg_attr(feature = "pattern", feature(pattern))] #![warn(missing_debug_implementations)] #[cfg(doctest)] doc_comment::doctest!("../README.md"); extern crate alloc; #[cfg(any(test, feature = "std"))] extern crate std; pub use crate::error::Error; pub use crate::{builders::string::*, regex::string::*, regexset::string::*}; mod builders; pub mod bytes; mod error; mod find_byte; #[cfg(feature = "pattern")] mod pattern; mod regex; mod regexset; /// Escapes all regular expression meta characters in `pattern`. /// /// The string returned may be safely used as a literal in a regular /// expression. pub fn escape(pattern: &str) -> alloc::string::String { regex_syntax::escape(pattern) } <file_sep>/regex-automata/src/dfa/minimize.rs use core::{cell::RefCell, fmt, mem}; use alloc::{collections::BTreeMap, rc::Rc, vec, vec::Vec}; use crate::{ dfa::{automaton::Automaton, dense, DEAD}, util::{ alphabet, primitives::{PatternID, StateID}, }, }; /// An implementation of Hopcroft's algorithm for minimizing DFAs. /// /// The algorithm implemented here is mostly taken from Wikipedia: /// https://en.wikipedia.org/wiki/DFA_minimization#Hopcroft's_algorithm /// /// This code has had some light optimization attention paid to it, /// particularly in the form of reducing allocation as much as possible. /// However, it is still generally slow. Future optimization work should /// probably focus on the bigger picture rather than micro-optimizations. For /// example: /// /// 1. Figure out how to more intelligently create initial partitions. That is, /// Hopcroft's algorithm starts by creating two partitions of DFA states /// that are known to NOT be equivalent: match states and non-match states. /// The algorithm proceeds by progressively refining these partitions into /// smaller partitions. If we could start with more partitions, then we /// could reduce the amount of work that Hopcroft's algorithm needs to do. /// 2. For every partition that we visit, we find all incoming transitions to /// every state in the partition for *every* element in the alphabet. (This /// is why using byte classes can significantly decrease minimization times, /// since byte classes shrink the alphabet.) This is quite costly and there /// is perhaps some redundant work being performed depending on the specific /// states in the set. For example, we might be able to only visit some /// elements of the alphabet based on the transitions. /// 3. Move parts of minimization into determinization. If minimization has /// fewer states to deal with, then it should run faster. A prime example /// of this might be large Unicode classes, which are generated in way that /// can create a lot of redundant states. (Some work has been done on this /// point during NFA compilation via the algorithm described in the /// "Incremental Construction of MinimalAcyclic Finite-State Automata" /// paper.) pub(crate) struct Minimizer<'a> { dfa: &'a mut dense::OwnedDFA, in_transitions: Vec<Vec<Vec<StateID>>>, partitions: Vec<StateSet>, waiting: Vec<StateSet>, } impl<'a> fmt::Debug for Minimizer<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Minimizer") .field("dfa", &self.dfa) .field("in_transitions", &self.in_transitions) .field("partitions", &self.partitions) .field("waiting", &self.waiting) .finish() } } /// A set of states. A state set makes up a single partition in Hopcroft's /// algorithm. /// /// It is represented by an ordered set of state identifiers. We use shared /// ownership so that a single state set can be in both the set of partitions /// and in the set of waiting sets simultaneously without an additional /// allocation. Generally, once a state set is built, it becomes immutable. /// /// We use this representation because it avoids the overhead of more /// traditional set data structures (HashSet/BTreeSet), and also because /// computing intersection/subtraction on this representation is especially /// fast. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] struct StateSet { ids: Rc<RefCell<Vec<StateID>>>, } impl<'a> Minimizer<'a> { pub fn new(dfa: &'a mut dense::OwnedDFA) -> Minimizer<'a> { let in_transitions = Minimizer::incoming_transitions(dfa); let partitions = Minimizer::initial_partitions(dfa); let waiting = partitions.clone(); Minimizer { dfa, in_transitions, partitions, waiting } } pub fn run(mut self) { let stride2 = self.dfa.stride2(); let as_state_id = |index: usize| -> StateID { StateID::new(index << stride2).unwrap() }; let as_index = |id: StateID| -> usize { id.as_usize() >> stride2 }; let mut incoming = StateSet::empty(); let mut scratch1 = StateSet::empty(); let mut scratch2 = StateSet::empty(); let mut newparts = vec![]; // This loop is basically Hopcroft's algorithm. Everything else is just // shuffling data around to fit our representation. while let Some(set) = self.waiting.pop() { for b in self.dfa.byte_classes().iter() { self.find_incoming_to(b, &set, &mut incoming); // If incoming is empty, then the intersection with any other // set must also be empty. So 'newparts' just ends up being // 'self.partitions'. So there's no need to go through the loop // below. // // This actually turns out to be rather large optimization. On // the order of making minimization 4-5x faster. It's likely // that the vast majority of all states have very few incoming // transitions. if incoming.is_empty() { continue; } for p in 0..self.partitions.len() { self.partitions[p].intersection(&incoming, &mut scratch1); if scratch1.is_empty() { newparts.push(self.partitions[p].clone()); continue; } self.partitions[p].subtract(&incoming, &mut scratch2); if scratch2.is_empty() { newparts.push(self.partitions[p].clone()); continue; } let (x, y) = (scratch1.deep_clone(), scratch2.deep_clone()); newparts.push(x.clone()); newparts.push(y.clone()); match self.find_waiting(&self.partitions[p]) { Some(i) => { self.waiting[i] = x; self.waiting.push(y); } None => { if x.len() <= y.len() { self.waiting.push(x); } else { self.waiting.push(y); } } } } newparts = mem::replace(&mut self.partitions, newparts); newparts.clear(); } } // At this point, we now have a minimal partitioning of states, where // each partition is an equivalence class of DFA states. Now we need to // use this partitioning to update the DFA to only contain one state for // each partition. // Create a map from DFA state ID to the representative ID of the // equivalence class to which it belongs. The representative ID of an // equivalence class of states is the minimum ID in that class. let mut state_to_part = vec![DEAD; self.dfa.state_len()]; for p in &self.partitions { p.iter(|id| state_to_part[as_index(id)] = p.min()); } // Generate a new contiguous sequence of IDs for minimal states, and // create a map from equivalence IDs to the new IDs. Thus, the new // minimal ID of *any* state in the unminimized DFA can be obtained // with minimals_ids[state_to_part[old_id]]. let mut minimal_ids = vec![DEAD; self.dfa.state_len()]; let mut new_index = 0; for state in self.dfa.states() { if state_to_part[as_index(state.id())] == state.id() { minimal_ids[as_index(state.id())] = as_state_id(new_index); new_index += 1; } } // The total number of states in the minimal DFA. let minimal_count = new_index; // Convenience function for remapping state IDs. This takes an old ID, // looks up its Hopcroft partition and then maps that to the new ID // range. let remap = |old| minimal_ids[as_index(state_to_part[as_index(old)])]; // Re-map this DFA in place such that the only states remaining // correspond to the representative states of every equivalence class. for id in (0..self.dfa.state_len()).map(as_state_id) { // If this state isn't a representative for an equivalence class, // then we skip it since it won't appear in the minimal DFA. if state_to_part[as_index(id)] != id { continue; } self.dfa.remap_state(id, remap); self.dfa.swap_states(id, minimal_ids[as_index(id)]); } // Trim off all unused states from the pre-minimized DFA. This // represents all states that were merged into a non-singleton // equivalence class of states, and appeared after the first state // in each such class. (Because the state with the smallest ID in each // equivalence class is its representative ID.) self.dfa.truncate_states(minimal_count); // Update the new start states, which is now just the minimal ID of // whatever state the old start state was collapsed into. Also, we // collect everything before-hand to work around the borrow checker. // We're already allocating so much that this is probably fine. If this // turns out to be costly, then I guess add a `starts_mut` iterator. let starts: Vec<_> = self.dfa.starts().collect(); for (old_start_id, anchored, start_type) in starts { self.dfa.set_start_state( anchored, start_type, remap(old_start_id), ); } // Update the match state pattern ID list for multi-regexes. All we // need to do is remap the match state IDs. The pattern ID lists are // always the same as they were since match states with distinct // pattern ID lists are always considered distinct states. let mut pmap = BTreeMap::new(); for (match_id, pattern_ids) in self.dfa.pattern_map() { let new_id = remap(match_id); pmap.insert(new_id, pattern_ids); } // This unwrap is OK because minimization never increases the number of // match states or patterns in those match states. Since minimization // runs after the pattern map has already been set at least once, we // know that our match states cannot error. self.dfa.set_pattern_map(&pmap).unwrap(); // In order to update the ID of the maximum match state, we need to // find the maximum ID among all of the match states in the minimized // DFA. This is not necessarily the new ID of the unminimized maximum // match state, since that could have been collapsed with a much // earlier match state. Therefore, to find the new max match state, // we iterate over all previous match states, find their corresponding // new minimal ID, and take the maximum of those. let old = self.dfa.special().clone(); let new = self.dfa.special_mut(); // ... but only remap if we had match states. if old.matches() { new.min_match = StateID::MAX; new.max_match = StateID::ZERO; for i in as_index(old.min_match)..=as_index(old.max_match) { let new_id = remap(as_state_id(i)); if new_id < new.min_match { new.min_match = new_id; } if new_id > new.max_match { new.max_match = new_id; } } } // ... same, but for start states. if old.starts() { new.min_start = StateID::MAX; new.max_start = StateID::ZERO; for i in as_index(old.min_start)..=as_index(old.max_start) { let new_id = remap(as_state_id(i)); if new_id == DEAD { continue; } if new_id < new.min_start { new.min_start = new_id; } if new_id > new.max_start { new.max_start = new_id; } } if new.max_start == DEAD { new.min_start = DEAD; } } new.quit_id = remap(new.quit_id); new.set_max(); } fn find_waiting(&self, set: &StateSet) -> Option<usize> { self.waiting.iter().position(|s| s == set) } fn find_incoming_to( &self, b: alphabet::Unit, set: &StateSet, incoming: &mut StateSet, ) { incoming.clear(); set.iter(|id| { for &inid in &self.in_transitions[self.dfa.to_index(id)][b.as_usize()] { incoming.add(inid); } }); incoming.canonicalize(); } fn initial_partitions(dfa: &dense::OwnedDFA) -> Vec<StateSet> { // For match states, we know that two match states with different // pattern ID lists will *always* be distinct, so we can partition them // initially based on that. let mut matching: BTreeMap<Vec<PatternID>, StateSet> = BTreeMap::new(); let mut is_quit = StateSet::empty(); let mut no_match = StateSet::empty(); for state in dfa.states() { if dfa.is_match_state(state.id()) { let mut pids = vec![]; for i in 0..dfa.match_len(state.id()) { pids.push(dfa.match_pattern(state.id(), i)); } matching .entry(pids) .or_insert(StateSet::empty()) .add(state.id()); } else if dfa.is_quit_state(state.id()) { is_quit.add(state.id()); } else { no_match.add(state.id()); } } let mut sets: Vec<StateSet> = matching.into_iter().map(|(_, set)| set).collect(); sets.push(no_match); sets.push(is_quit); sets } fn incoming_transitions(dfa: &dense::OwnedDFA) -> Vec<Vec<Vec<StateID>>> { let mut incoming = vec![]; for _ in dfa.states() { incoming.push(vec![vec![]; dfa.alphabet_len()]); } for state in dfa.states() { for (b, next) in state.transitions() { incoming[dfa.to_index(next)][b.as_usize()].push(state.id()); } } incoming } } impl StateSet { fn empty() -> StateSet { StateSet { ids: Rc::new(RefCell::new(vec![])) } } fn add(&mut self, id: StateID) { self.ids.borrow_mut().push(id); } fn min(&self) -> StateID { self.ids.borrow()[0] } fn canonicalize(&mut self) { self.ids.borrow_mut().sort(); self.ids.borrow_mut().dedup(); } fn clear(&mut self) { self.ids.borrow_mut().clear(); } fn len(&self) -> usize { self.ids.borrow().len() } fn is_empty(&self) -> bool { self.len() == 0 } fn deep_clone(&self) -> StateSet { let ids = self.ids.borrow().iter().cloned().collect(); StateSet { ids: Rc::new(RefCell::new(ids)) } } fn iter<F: FnMut(StateID)>(&self, mut f: F) { for &id in self.ids.borrow().iter() { f(id); } } fn intersection(&self, other: &StateSet, dest: &mut StateSet) { dest.clear(); if self.is_empty() || other.is_empty() { return; } let (seta, setb) = (self.ids.borrow(), other.ids.borrow()); let (mut ita, mut itb) = (seta.iter().cloned(), setb.iter().cloned()); let (mut a, mut b) = (ita.next().unwrap(), itb.next().unwrap()); loop { if a == b { dest.add(a); a = match ita.next() { None => break, Some(a) => a, }; b = match itb.next() { None => break, Some(b) => b, }; } else if a < b { a = match ita.next() { None => break, Some(a) => a, }; } else { b = match itb.next() { None => break, Some(b) => b, }; } } } fn subtract(&self, other: &StateSet, dest: &mut StateSet) { dest.clear(); if self.is_empty() || other.is_empty() { self.iter(|s| dest.add(s)); return; } let (seta, setb) = (self.ids.borrow(), other.ids.borrow()); let (mut ita, mut itb) = (seta.iter().cloned(), setb.iter().cloned()); let (mut a, mut b) = (ita.next().unwrap(), itb.next().unwrap()); loop { if a == b { a = match ita.next() { None => break, Some(a) => a, }; b = match itb.next() { None => { dest.add(a); break; } Some(b) => b, }; } else if a < b { dest.add(a); a = match ita.next() { None => break, Some(a) => a, }; } else { b = match itb.next() { None => { dest.add(a); break; } Some(b) => b, }; } } for a in ita { dest.add(a); } } } <file_sep>/testdata/line-terminator.toml # This tests that we can switch the line terminator to the NUL byte. [[test]] name = "nul" regex = '(?m)^[a-z]+$' haystack = '\x00abc\x00' matches = [[1, 4]] unescape = true line-terminator = '\x00' # This tests that '.' will not match the configured line terminator, but will # match \n. [[test]] name = "dot-changes-with-line-terminator" regex = '.' haystack = '\x00\n' matches = [[1, 2]] unescape = true line-terminator = '\x00' # This tests that when we switch the line terminator, \n is no longer # recognized as the terminator. [[test]] name = "not-line-feed" regex = '(?m)^[a-z]+$' haystack = '\nabc\n' matches = [] unescape = true line-terminator = '\x00' # This tests that we can set the line terminator to a non-ASCII byte and have # it behave as expected. [[test]] name = "non-ascii" regex = '(?m)^[a-z]+$' haystack = '\xFFabc\xFF' matches = [[1, 4]] unescape = true line-terminator = '\xFF' utf8 = false # This tests that we can set the line terminator to a byte corresponding to a # word character, and things work as expected. [[test]] name = "word-byte" regex = '(?m)^[a-z]+$' haystack = 'ZabcZ' matches = [[1, 4]] unescape = true line-terminator = 'Z' # This tests that we can set the line terminator to a byte corresponding to a # non-word character, and things work as expected. [[test]] name = "non-word-byte" regex = '(?m)^[a-z]+$' haystack = '%abc%' matches = [[1, 4]] unescape = true line-terminator = '%' # This combines "set line terminator to a word byte" with a word boundary # assertion, which should result in no match even though ^/$ matches. [[test]] name = "word-boundary" regex = '(?m)^\b[a-z]+\b$' haystack = 'ZabcZ' matches = [] unescape = true line-terminator = 'Z' # Like 'word-boundary', but does an anchored search at the point where ^ # matches, but where \b should not. [[test]] name = "word-boundary-at" regex = '(?m)^\b[a-z]+\b$' haystack = 'ZabcZ' matches = [] bounds = [1, 4] anchored = true unescape = true line-terminator = 'Z' # Like 'word-boundary-at', but flips the word boundary to a negation. This # in particular tests a tricky case in DFA engines, where they must consider # explicitly that a starting configuration from a custom line terminator may # also required setting the "is from word byte" flag on a state. Otherwise, # it's treated as "not from a word byte," which would result in \B not matching # here when it should. [[test]] name = "not-word-boundary-at" regex = '(?m)^\B[a-z]+\B$' haystack = 'ZabcZ' matches = [[1, 4]] bounds = [1, 4] anchored = true unescape = true line-terminator = 'Z' <file_sep>/regex-automata/src/dfa/automaton.rs #[cfg(feature = "alloc")] use crate::util::search::PatternSet; use crate::{ dfa::search, util::{ empty, prefilter::Prefilter, primitives::{PatternID, StateID}, search::{Anchored, HalfMatch, Input, MatchError}, }, }; /// A trait describing the interface of a deterministic finite automaton (DFA). /// /// The complexity of this trait probably means that it's unlikely for others /// to implement it. The primary purpose of the trait is to provide for a way /// of abstracting over different types of DFAs. In this crate, that means /// dense DFAs and sparse DFAs. (Dense DFAs are fast but memory hungry, where /// as sparse DFAs are slower but come with a smaller memory footprint. But /// they otherwise provide exactly equivalent expressive power.) For example, a /// [`dfa::regex::Regex`](crate::dfa::regex::Regex) is generic over this trait. /// /// Normally, a DFA's execution model is very simple. You might have a single /// start state, zero or more final or "match" states and a function that /// transitions from one state to the next given the next byte of input. /// Unfortunately, the interface described by this trait is significantly /// more complicated than this. The complexity has a number of different /// reasons, mostly motivated by performance, functionality or space savings: /// /// * A DFA can search for multiple patterns simultaneously. This /// means extra information is returned when a match occurs. Namely, /// a match is not just an offset, but an offset plus a pattern ID. /// [`Automaton::pattern_len`] returns the number of patterns compiled into /// the DFA, [`Automaton::match_len`] returns the total number of patterns /// that match in a particular state and [`Automaton::match_pattern`] permits /// iterating over the patterns that match in a particular state. /// * A DFA can have multiple start states, and the choice of which start /// state to use depends on the content of the string being searched and /// position of the search, as well as whether the search is an anchored /// search for a specific pattern in the DFA. Moreover, computing the start /// state also depends on whether you're doing a forward or a reverse search. /// [`Automaton::start_state_forward`] and [`Automaton::start_state_reverse`] /// are used to compute the start state for forward and reverse searches, /// respectively. /// * All matches are delayed by one byte to support things like `$` and `\b` /// at the end of a pattern. Therefore, every use of a DFA is required to use /// [`Automaton::next_eoi_state`] /// at the end of the search to compute the final transition. /// * For optimization reasons, some states are treated specially. Every /// state is either special or not, which can be determined via the /// [`Automaton::is_special_state`] method. If it's special, then the state /// must be at least one of a few possible types of states. (Note that some /// types can overlap, for example, a match state can also be an accel state. /// But some types can't. If a state is a dead state, then it can never be any /// other type of state.) Those types are: /// * A dead state. A dead state means the DFA will never enter a match /// state. This can be queried via the [`Automaton::is_dead_state`] method. /// * A quit state. A quit state occurs if the DFA had to stop the search /// prematurely for some reason. This can be queried via the /// [`Automaton::is_quit_state`] method. /// * A match state. A match state occurs when a match is found. When a DFA /// enters a match state, the search may stop immediately (when looking /// for the earliest match), or it may continue to find the leftmost-first /// match. This can be queried via the [`Automaton::is_match_state`] /// method. /// * A start state. A start state is where a search begins. For every /// search, there is exactly one start state that is used, however, a /// DFA may contain many start states. When the search is in a start /// state, it may use a prefilter to quickly skip to candidate matches /// without executing the DFA on every byte. This can be queried via the /// [`Automaton::is_start_state`] method. /// * An accel state. An accel state is a state that is accelerated. /// That is, it is a state where _most_ of its transitions loop back to /// itself and only a small number of transitions lead to other states. /// This kind of state is said to be accelerated because a search routine /// can quickly look for the bytes leading out of the state instead of /// continuing to execute the DFA on each byte. This can be queried via the /// [`Automaton::is_accel_state`] method. And the bytes that lead out of /// the state can be queried via the [`Automaton::accelerator`] method. /// /// There are a number of provided methods on this trait that implement /// efficient searching (for forwards and backwards) with a DFA using /// all of the above features of this trait. In particular, given the /// complexity of all these features, implementing a search routine in /// this trait can be a little subtle. With that said, it is possible to /// somewhat simplify the search routine. For example, handling accelerated /// states is strictly optional, since it is always correct to assume that /// `Automaton::is_accel_state` returns false. However, one complex part of /// writing a search routine using this trait is handling the 1-byte delay of a /// match. That is not optional. /// /// # Safety /// /// This trait is not safe to implement so that code may rely on the /// correctness of implementations of this trait to avoid undefined behavior. /// The primary correctness guarantees are: /// /// * `Automaton::start_state` always returns a valid state ID or an error or /// panics. /// * `Automaton::next_state`, when given a valid state ID, always returns /// a valid state ID for all values of `anchored` and `byte`, or otherwise /// panics. /// /// In general, the rest of the methods on `Automaton` need to uphold their /// contracts as well. For example, `Automaton::is_dead` should only returns /// true if the given state ID is actually a dead state. pub unsafe trait Automaton { /// Transitions from the current state to the next state, given the next /// byte of input. /// /// Implementations must guarantee that the returned ID is always a valid /// ID when `current` refers to a valid ID. Moreover, the transition /// function must be defined for all possible values of `input`. /// /// # Panics /// /// If the given ID does not refer to a valid state, then this routine /// may panic but it also may not panic and instead return an invalid ID. /// However, if the caller provides an invalid ID then this must never /// sacrifice memory safety. /// /// # Example /// /// This shows a simplistic example for walking a DFA for a given haystack /// by using the `next_state` method. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, Input}; /// /// let dfa = dense::DFA::new(r"[a-z]+r")?; /// let haystack = "bar".as_bytes(); /// /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; /// // Walk all the bytes in the haystack. /// for &b in haystack { /// state = dfa.next_state(state, b); /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk the /// // special "EOI" transition at the end of the search. /// state = dfa.next_eoi_state(state); /// assert!(dfa.is_match_state(state)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn next_state(&self, current: StateID, input: u8) -> StateID; /// Transitions from the current state to the next state, given the next /// byte of input. /// /// Unlike [`Automaton::next_state`], implementations may implement this /// more efficiently by assuming that the `current` state ID is valid. /// Typically, this manifests by eliding bounds checks. /// /// # Safety /// /// Callers of this method must guarantee that `current` refers to a valid /// state ID. If `current` is not a valid state ID for this automaton, then /// calling this routine may result in undefined behavior. /// /// If `current` is valid, then implementations must guarantee that the ID /// returned is valid for all possible values of `input`. unsafe fn next_state_unchecked( &self, current: StateID, input: u8, ) -> StateID; /// Transitions from the current state to the next state for the special /// EOI symbol. /// /// Implementations must guarantee that the returned ID is always a valid /// ID when `current` refers to a valid ID. /// /// This routine must be called at the end of every search in a correct /// implementation of search. Namely, DFAs in this crate delay matches /// by one byte in order to support look-around operators. Thus, after /// reaching the end of a haystack, a search implementation must follow one /// last EOI transition. /// /// It is best to think of EOI as an additional symbol in the alphabet of /// a DFA that is distinct from every other symbol. That is, the alphabet /// of DFAs in this crate has a logical size of 257 instead of 256, where /// 256 corresponds to every possible inhabitant of `u8`. (In practice, the /// physical alphabet size may be smaller because of alphabet compression /// via equivalence classes, but EOI is always represented somehow in the /// alphabet.) /// /// # Panics /// /// If the given ID does not refer to a valid state, then this routine /// may panic but it also may not panic and instead return an invalid ID. /// However, if the caller provides an invalid ID then this must never /// sacrifice memory safety. /// /// # Example /// /// This shows a simplistic example for walking a DFA for a given haystack, /// and then finishing the search with the final EOI transition. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, Input}; /// /// let dfa = dense::DFA::new(r"[a-z]+r")?; /// let haystack = "bar".as_bytes(); /// /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. /// // /// // The unwrap is OK because we aren't requesting a start state for a /// // specific pattern. /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; /// // Walk all the bytes in the haystack. /// for &b in haystack { /// state = dfa.next_state(state, b); /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk /// // the special "EOI" transition at the end of the search. Without this /// // final transition, the assert below will fail since the DFA will not /// // have entered a match state yet! /// state = dfa.next_eoi_state(state); /// assert!(dfa.is_match_state(state)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn next_eoi_state(&self, current: StateID) -> StateID; /// Return the ID of the start state for this lazy DFA when executing a /// forward search. /// /// Unlike typical DFA implementations, the start state for DFAs in this /// crate is dependent on a few different factors: /// /// * The [`Anchored`] mode of the search. Unanchored, anchored and /// anchored searches for a specific [`PatternID`] all use different start /// states. /// * The position at which the search begins, via [`Input::start`]. This /// and the byte immediately preceding the start of the search (if one /// exists) influence which look-behind assertions are true at the start /// of the search. This in turn influences which start state is selected. /// * Whether the search is a forward or reverse search. This routine can /// only be used for forward searches. /// /// # Errors /// /// This may return a [`MatchError`] if the search needs to give up /// when determining the start state (for example, if it sees a "quit" /// byte). This can also return an error if the given `Input` contains an /// unsupported [`Anchored`] configuration. fn start_state_forward( &self, input: &Input<'_>, ) -> Result<StateID, MatchError>; /// Return the ID of the start state for this lazy DFA when executing a /// reverse search. /// /// Unlike typical DFA implementations, the start state for DFAs in this /// crate is dependent on a few different factors: /// /// * The [`Anchored`] mode of the search. Unanchored, anchored and /// anchored searches for a specific [`PatternID`] all use different start /// states. /// * The position at which the search begins, via [`Input::start`]. This /// and the byte immediately preceding the start of the search (if one /// exists) influence which look-behind assertions are true at the start /// of the search. This in turn influences which start state is selected. /// * Whether the search is a forward or reverse search. This routine can /// only be used for reverse searches. /// /// # Errors /// /// This may return a [`MatchError`] if the search needs to give up /// when determining the start state (for example, if it sees a "quit" /// byte). This can also return an error if the given `Input` contains an /// unsupported [`Anchored`] configuration. fn start_state_reverse( &self, input: &Input<'_>, ) -> Result<StateID, MatchError>; /// If this DFA has a universal starting state for the given anchor mode /// and the DFA supports universal starting states, then this returns that /// state's identifier. /// /// A DFA is said to have a universal starting state when the starting /// state is invariant with respect to the haystack. Usually, the starting /// state is chosen depending on the bytes immediately surrounding the /// starting position of a search. However, the starting state only differs /// when one or more of the patterns in the DFA have look-around assertions /// in its prefix. /// /// Stated differently, if none of the patterns in a DFA have look-around /// assertions in their prefix, then the DFA has a universal starting state /// and _may_ be returned by this method. /// /// It is always correct for implementations to return `None`, and indeed, /// this is what the default implementation does. When this returns `None`, /// callers must use either `start_state_forward` or `start_state_reverse` /// to get the starting state. /// /// # Use case /// /// There are a few reasons why one might want to use this: /// /// * If you know your regex patterns have no look-around assertions in /// their prefix, then calling this routine is likely cheaper and perhaps /// more semantically meaningful. /// * When implementing prefilter support in a DFA regex implementation, /// it is necessary to re-compute the start state after a candidate /// is returned from the prefilter. However, this is only needed when /// there isn't a universal start state. When one exists, one can avoid /// re-computing the start state. /// /// # Example /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, dense::DFA}, /// Anchored, /// }; /// /// // There are no look-around assertions in the prefixes of any of the /// // patterns, so we get a universal start state. /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+$", "[A-Z]+"])?; /// assert!(dfa.universal_start_state(Anchored::No).is_some()); /// assert!(dfa.universal_start_state(Anchored::Yes).is_some()); /// /// // One of the patterns has a look-around assertion in its prefix, /// // so this means there is no longer a universal start state. /// let dfa = DFA::new_many(&["[0-9]+", "^[a-z]+$", "[A-Z]+"])?; /// assert!(!dfa.universal_start_state(Anchored::No).is_some()); /// assert!(!dfa.universal_start_state(Anchored::Yes).is_some()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] fn universal_start_state(&self, _mode: Anchored) -> Option<StateID> { None } /// Returns true if and only if the given identifier corresponds to a /// "special" state. A special state is one or more of the following: /// a dead state, a quit state, a match state, a start state or an /// accelerated state. /// /// A correct implementation _may_ always return false for states that /// are either start states or accelerated states, since that information /// is only intended to be used for optimization purposes. Correct /// implementations must return true if the state is a dead, quit or match /// state. This is because search routines using this trait must be able /// to rely on `is_special_state` as an indicator that a state may need /// special treatment. (For example, when a search routine sees a dead /// state, it must terminate.) /// /// This routine permits search implementations to use a single branch to /// check whether a state needs special attention before executing the next /// transition. The example below shows how to do this. /// /// # Example /// /// This example shows how `is_special_state` can be used to implement a /// correct search routine with minimal branching. In particular, this /// search routine implements "leftmost" matching, which means that it /// doesn't immediately stop once a match is found. Instead, it continues /// until it reaches a dead state. /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, dense}, /// HalfMatch, MatchError, Input, /// }; /// /// fn find<A: Automaton>( /// dfa: &A, /// haystack: &[u8], /// ) -> Result<Option<HalfMatch>, MatchError> { /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. Note that start states can never /// // be match states (since DFAs in this crate delay matches by 1 /// // byte), so we don't need to check if the start state is a match. /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; /// let mut last_match = None; /// // Walk all the bytes in the haystack. We can quit early if we see /// // a dead or a quit state. The former means the automaton will /// // never transition to any other state. The latter means that the /// // automaton entered a condition in which its search failed. /// for (i, &b) in haystack.iter().enumerate() { /// state = dfa.next_state(state, b); /// if dfa.is_special_state(state) { /// if dfa.is_match_state(state) { /// last_match = Some(HalfMatch::new( /// dfa.match_pattern(state, 0), /// i, /// )); /// } else if dfa.is_dead_state(state) { /// return Ok(last_match); /// } else if dfa.is_quit_state(state) { /// // It is possible to enter into a quit state after /// // observing a match has occurred. In that case, we /// // should return the match instead of an error. /// if last_match.is_some() { /// return Ok(last_match); /// } /// return Err(MatchError::quit(b, i)); /// } /// // Implementors may also want to check for start or accel /// // states and handle them differently for performance /// // reasons. But it is not necessary for correctness. /// } /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk /// // the special "EOI" transition at the end of the search. /// state = dfa.next_eoi_state(state); /// if dfa.is_match_state(state) { /// last_match = Some(HalfMatch::new( /// dfa.match_pattern(state, 0), /// haystack.len(), /// )); /// } /// Ok(last_match) /// } /// /// // We use a greedy '+' operator to show how the search doesn't just /// // stop once a match is detected. It continues extending the match. /// // Using '[a-z]+?' would also work as expected and stop the search /// // early. Greediness is built into the automaton. /// let dfa = dense::DFA::new(r"[a-z]+")?; /// let haystack = "123 foobar 4567".as_bytes(); /// let mat = find(&dfa, haystack)?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 10); /// /// // Here's another example that tests our handling of the special EOI /// // transition. This will fail to find a match if we don't call /// // 'next_eoi_state' at the end of the search since the match isn't /// // found until the final byte in the haystack. /// let dfa = dense::DFA::new(r"[0-9]{4}")?; /// let haystack = "123 foobar 4567".as_bytes(); /// let mat = find(&dfa, haystack)?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 15); /// /// // And note that our search implementation above automatically works /// // with multi-DFAs. Namely, `dfa.match_pattern(match_state, 0)` selects /// // the appropriate pattern ID for us. /// let dfa = dense::DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; /// let haystack = "123 foobar 4567".as_bytes(); /// let mat = find(&dfa, haystack)?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 1); /// assert_eq!(mat.offset(), 3); /// let mat = find(&dfa, &haystack[3..])?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 7); /// let mat = find(&dfa, &haystack[10..])?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 1); /// assert_eq!(mat.offset(), 5); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn is_special_state(&self, id: StateID) -> bool; /// Returns true if and only if the given identifier corresponds to a dead /// state. When a DFA enters a dead state, it is impossible to leave. That /// is, every transition on a dead state by definition leads back to the /// same dead state. /// /// In practice, the dead state always corresponds to the identifier `0`. /// Moreover, in practice, there is only one dead state. /// /// The existence of a dead state is not strictly required in the classical /// model of finite state machines, where one generally only cares about /// the question of whether an input sequence matches or not. Dead states /// are not needed to answer that question, since one can immediately quit /// as soon as one enters a final or "match" state. However, we don't just /// care about matches but also care about the location of matches, and /// more specifically, care about semantics like "greedy" matching. /// /// For example, given the pattern `a+` and the input `aaaz`, the dead /// state won't be entered until the state machine reaches `z` in the /// input, at which point, the search routine can quit. But without the /// dead state, the search routine wouldn't know when to quit. In a /// classical representation, the search routine would stop after seeing /// the first `a` (which is when the search would enter a match state). But /// this wouldn't implement "greedy" matching where `a+` matches as many /// `a`'s as possible. /// /// # Example /// /// See the example for [`Automaton::is_special_state`] for how to use this /// method correctly. fn is_dead_state(&self, id: StateID) -> bool; /// Returns true if and only if the given identifier corresponds to a quit /// state. A quit state is like a dead state (it has no transitions other /// than to itself), except it indicates that the DFA failed to complete /// the search. When this occurs, callers can neither accept or reject that /// a match occurred. /// /// In practice, the quit state always corresponds to the state immediately /// following the dead state. (Which is not usually represented by `1`, /// since state identifiers are pre-multiplied by the state machine's /// alphabet stride, and the alphabet stride varies between DFAs.) /// /// The typical way in which a quit state can occur is when heuristic /// support for Unicode word boundaries is enabled via the /// [`dense::Config::unicode_word_boundary`](crate::dfa::dense::Config::unicode_word_boundary) /// option. But other options, like the lower level /// [`dense::Config::quit`](crate::dfa::dense::Config::quit) /// configuration, can also result in a quit state being entered. The /// purpose of the quit state is to provide a way to execute a fast DFA /// in common cases while delegating to slower routines when the DFA quits. /// /// The default search implementations provided by this crate will return a /// [`MatchError::quit`] error when a quit state is entered. /// /// # Example /// /// See the example for [`Automaton::is_special_state`] for how to use this /// method correctly. fn is_quit_state(&self, id: StateID) -> bool; /// Returns true if and only if the given identifier corresponds to a /// match state. A match state is also referred to as a "final" state and /// indicates that a match has been found. /// /// If all you care about is whether a particular pattern matches in the /// input sequence, then a search routine can quit early as soon as the /// machine enters a match state. However, if you're looking for the /// standard "leftmost-first" match location, then search _must_ continue /// until either the end of the input or until the machine enters a dead /// state. (Since either condition implies that no other useful work can /// be done.) Namely, when looking for the location of a match, then /// search implementations should record the most recent location in /// which a match state was entered, but otherwise continue executing the /// search as normal. (The search may even leave the match state.) Once /// the termination condition is reached, the most recently recorded match /// location should be returned. /// /// Finally, one additional power given to match states in this crate /// is that they are always associated with a specific pattern in order /// to support multi-DFAs. See [`Automaton::match_pattern`] for more /// details and an example for how to query the pattern associated with a /// particular match state. /// /// # Example /// /// See the example for [`Automaton::is_special_state`] for how to use this /// method correctly. fn is_match_state(&self, id: StateID) -> bool; /// Returns true only if the given identifier corresponds to a start /// state /// /// A start state is a state in which a DFA begins a search. /// All searches begin in a start state. Moreover, since all matches are /// delayed by one byte, a start state can never be a match state. /// /// The main role of a start state is, as mentioned, to be a starting /// point for a DFA. This starting point is determined via one of /// [`Automaton::start_state_forward`] or /// [`Automaton::start_state_reverse`], depending on whether one is doing /// a forward or a reverse search, respectively. /// /// A secondary use of start states is for prefix acceleration. Namely, /// while executing a search, if one detects that you're in a start state, /// then it may be faster to look for the next match of a prefix of the /// pattern, if one exists. If a prefix exists and since all matches must /// begin with that prefix, then skipping ahead to occurrences of that /// prefix may be much faster than executing the DFA. /// /// As mentioned in the documentation for /// [`is_special_state`](Automaton::is_special_state) implementations /// _may_ always return false, even if the given identifier is a start /// state. This is because knowing whether a state is a start state or not /// is not necessary for correctness and is only treated as a potential /// performance optimization. (For example, the implementations of this /// trait in this crate will only return true when the given identifier /// corresponds to a start state and when [specialization of start /// states](crate::dfa::dense::Config::specialize_start_states) was enabled /// during DFA construction. If start state specialization is disabled /// (which is the default), then this method will always return false.) /// /// # Example /// /// This example shows how to implement your own search routine that does /// a prefix search whenever the search enters a start state. /// /// Note that you do not need to implement your own search routine /// to make use of prefilters like this. The search routines /// provided by this crate already implement prefilter support via /// the [`Prefilter`](crate::util::prefilter::Prefilter) trait. /// A prefilter can be added to your search configuration with /// [`dense::Config::prefilter`](crate::dfa::dense::Config::prefilter) for /// dense and sparse DFAs in this crate. /// /// This example is meant to show how you might deal with prefilters in a /// simplified case if you are implementing your own search routine. /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, dense}, /// HalfMatch, MatchError, Input, /// }; /// /// fn find_byte(slice: &[u8], at: usize, byte: u8) -> Option<usize> { /// // Would be faster to use the memchr crate, but this is still /// // faster than running through the DFA. /// slice[at..].iter().position(|&b| b == byte).map(|i| at + i) /// } /// /// fn find<A: Automaton>( /// dfa: &A, /// haystack: &[u8], /// prefix_byte: Option<u8>, /// ) -> Result<Option<HalfMatch>, MatchError> { /// // See the Automaton::is_special_state example for similar code /// // with more comments. /// /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; /// let mut last_match = None; /// let mut pos = 0; /// while pos < haystack.len() { /// let b = haystack[pos]; /// state = dfa.next_state(state, b); /// pos += 1; /// if dfa.is_special_state(state) { /// if dfa.is_match_state(state) { /// last_match = Some(HalfMatch::new( /// dfa.match_pattern(state, 0), /// pos - 1, /// )); /// } else if dfa.is_dead_state(state) { /// return Ok(last_match); /// } else if dfa.is_quit_state(state) { /// // It is possible to enter into a quit state after /// // observing a match has occurred. In that case, we /// // should return the match instead of an error. /// if last_match.is_some() { /// return Ok(last_match); /// } /// return Err(MatchError::quit(b, pos - 1)); /// } else if dfa.is_start_state(state) { /// // If we're in a start state and know all matches begin /// // with a particular byte, then we can quickly skip to /// // candidate matches without running the DFA through /// // every byte inbetween. /// if let Some(prefix_byte) = prefix_byte { /// pos = match find_byte(haystack, pos, prefix_byte) { /// Some(pos) => pos, /// None => break, /// }; /// } /// } /// } /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk /// // the special "EOI" transition at the end of the search. /// state = dfa.next_eoi_state(state); /// if dfa.is_match_state(state) { /// last_match = Some(HalfMatch::new( /// dfa.match_pattern(state, 0), /// haystack.len(), /// )); /// } /// Ok(last_match) /// } /// /// // In this example, it's obvious that all occurrences of our pattern /// // begin with 'Z', so we pass in 'Z'. Note also that we need to /// // enable start state specialization, or else it won't be possible to /// // detect start states during a search. ('is_start_state' would always /// // return false.) /// let dfa = dense::DFA::builder() /// .configure(dense::DFA::config().specialize_start_states(true)) /// .build(r"Z[a-z]+")?; /// let haystack = "123 foobar Zbaz quux".as_bytes(); /// let mat = find(&dfa, haystack, Some(b'Z'))?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 15); /// /// // But note that we don't need to pass in a prefix byte. If we don't, /// // then the search routine does no acceleration. /// let mat = find(&dfa, haystack, None)?.unwrap(); /// assert_eq!(mat.pattern().as_usize(), 0); /// assert_eq!(mat.offset(), 15); /// /// // However, if we pass an incorrect byte, then the prefix search will /// // result in incorrect results. /// assert_eq!(find(&dfa, haystack, Some(b'X'))?, None); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn is_start_state(&self, id: StateID) -> bool; /// Returns true if and only if the given identifier corresponds to an /// accelerated state. /// /// An accelerated state is a special optimization /// trick implemented by this crate. Namely, if /// [`dense::Config::accelerate`](crate::dfa::dense::Config::accelerate) is /// enabled (and it is by default), then DFAs generated by this crate will /// tag states meeting certain characteristics as accelerated. States meet /// this criteria whenever most of their transitions are self-transitions. /// That is, transitions that loop back to the same state. When a small /// number of transitions aren't self-transitions, then it follows that /// there are only a small number of bytes that can cause the DFA to leave /// that state. Thus, there is an opportunity to look for those bytes /// using more optimized routines rather than continuing to run through /// the DFA. This trick is similar to the prefilter idea described in /// the documentation of [`Automaton::is_start_state`] with two main /// differences: /// /// 1. It is more limited since acceleration only applies to single bytes. /// This means states are rarely accelerated when Unicode mode is enabled /// (which is enabled by default). /// 2. It can occur anywhere in the DFA, which increases optimization /// opportunities. /// /// Like the prefilter idea, the main downside (and a possible reason to /// disable it) is that it can lead to worse performance in some cases. /// Namely, if a state is accelerated for very common bytes, then the /// overhead of checking for acceleration and using the more optimized /// routines to look for those bytes can cause overall performance to be /// worse than if acceleration wasn't enabled at all. /// /// A simple example of a regex that has an accelerated state is /// `(?-u)[^a]+a`. Namely, the `[^a]+` sub-expression gets compiled down /// into a single state where all transitions except for `a` loop back to /// itself, and where `a` is the only transition (other than the special /// EOI transition) that goes to some other state. Thus, this state can /// be accelerated and implemented more efficiently by calling an /// optimized routine like `memchr` with `a` as the needle. Notice that /// the `(?-u)` to disable Unicode is necessary here, as without it, /// `[^a]` will match any UTF-8 encoding of any Unicode scalar value other /// than `a`. This more complicated expression compiles down to many DFA /// states and the simple acceleration optimization is no longer available. /// /// Typically, this routine is used to guard calls to /// [`Automaton::accelerator`], which returns the accelerated bytes for /// the specified state. fn is_accel_state(&self, id: StateID) -> bool; /// Returns the total number of patterns compiled into this DFA. /// /// In the case of a DFA that contains no patterns, this must return `0`. /// /// # Example /// /// This example shows the pattern length for a DFA that never matches: /// /// ``` /// use regex_automata::dfa::{Automaton, dense::DFA}; /// /// let dfa: DFA<Vec<u32>> = DFA::never_match()?; /// assert_eq!(dfa.pattern_len(), 0); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And another example for a DFA that matches at every position: /// /// ``` /// use regex_automata::dfa::{Automaton, dense::DFA}; /// /// let dfa: DFA<Vec<u32>> = DFA::always_match()?; /// assert_eq!(dfa.pattern_len(), 1); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And finally, a DFA that was constructed from multiple patterns: /// /// ``` /// use regex_automata::dfa::{Automaton, dense::DFA}; /// /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; /// assert_eq!(dfa.pattern_len(), 3); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn pattern_len(&self) -> usize; /// Returns the total number of patterns that match in this state. /// /// If the given state is not a match state, then implementations may /// panic. /// /// If the DFA was compiled with one pattern, then this must necessarily /// always return `1` for all match states. /// /// Implementations must guarantee that [`Automaton::match_pattern`] can be /// called with indices up to (but not including) the length returned by /// this routine without panicking. /// /// # Panics /// /// Implementations are permitted to panic if the provided state ID does /// not correspond to a match state. /// /// # Example /// /// This example shows a simple instance of implementing overlapping /// matches. In particular, it shows not only how to determine how many /// patterns have matched in a particular state, but also how to access /// which specific patterns have matched. /// /// Notice that we must use /// [`MatchKind::All`](crate::MatchKind::All) /// when building the DFA. If we used /// [`MatchKind::LeftmostFirst`](crate::MatchKind::LeftmostFirst) /// instead, then the DFA would not be constructed in a way that /// supports overlapping matches. (It would only report a single pattern /// that matches at any particular point in time.) /// /// Another thing to take note of is the patterns used and the order in /// which the pattern IDs are reported. In the example below, pattern `3` /// is yielded first. Why? Because it corresponds to the match that /// appears first. Namely, the `@` symbol is part of `\S+` but not part /// of any of the other patterns. Since the `\S+` pattern has a match that /// starts to the left of any other pattern, its ID is returned before any /// other. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::{Automaton, dense}, Input, MatchKind}; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().match_kind(MatchKind::All)) /// .build_many(&[ /// r"[[:word:]]+", r"[a-z]+", r"[A-Z]+", r"[[:^space:]]+", /// ])?; /// let haystack = "@bar".as_bytes(); /// /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; /// // Walk all the bytes in the haystack. /// for &b in haystack { /// state = dfa.next_state(state, b); /// } /// state = dfa.next_eoi_state(state); /// /// assert!(dfa.is_match_state(state)); /// assert_eq!(dfa.match_len(state), 3); /// // The following calls are guaranteed to not panic since `match_len` /// // returned `3` above. /// assert_eq!(dfa.match_pattern(state, 0).as_usize(), 3); /// assert_eq!(dfa.match_pattern(state, 1).as_usize(), 0); /// assert_eq!(dfa.match_pattern(state, 2).as_usize(), 1); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn match_len(&self, id: StateID) -> usize; /// Returns the pattern ID corresponding to the given match index in the /// given state. /// /// See [`Automaton::match_len`] for an example of how to use this /// method correctly. Note that if you know your DFA is compiled with a /// single pattern, then this routine is never necessary since it will /// always return a pattern ID of `0` for an index of `0` when `id` /// corresponds to a match state. /// /// Typically, this routine is used when implementing an overlapping /// search, as the example for `Automaton::match_len` does. /// /// # Panics /// /// If the state ID is not a match state or if the match index is out /// of bounds for the given state, then this routine may either panic /// or produce an incorrect result. If the state ID is correct and the /// match index is correct, then this routine must always produce a valid /// `PatternID`. fn match_pattern(&self, id: StateID, index: usize) -> PatternID; /// Returns true if and only if this automaton can match the empty string. /// When it returns false, all possible matches are guaranteed to have a /// non-zero length. /// /// This is useful as cheap way to know whether code needs to handle the /// case of a zero length match. This is particularly important when UTF-8 /// modes are enabled, as when UTF-8 mode is enabled, empty matches that /// split a codepoint must never be reported. This extra handling can /// sometimes be costly, and since regexes matching an empty string are /// somewhat rare, it can be beneficial to treat such regexes specially. /// /// # Example /// /// This example shows a few different DFAs and whether they match the /// empty string or not. Notice the empty string isn't merely a matter /// of a string of length literally `0`, but rather, whether a match can /// occur between specific pairs of bytes. /// /// ``` /// use regex_automata::{dfa::{dense::DFA, Automaton}, util::syntax}; /// /// // The empty regex matches the empty string. /// let dfa = DFA::new("")?; /// assert!(dfa.has_empty(), "empty matches empty"); /// // The '+' repetition operator requires at least one match, and so /// // does not match the empty string. /// let dfa = DFA::new("a+")?; /// assert!(!dfa.has_empty(), "+ does not match empty"); /// // But the '*' repetition operator does. /// let dfa = DFA::new("a*")?; /// assert!(dfa.has_empty(), "* does match empty"); /// // And wrapping '+' in an operator that can match an empty string also /// // causes it to match the empty string too. /// let dfa = DFA::new("(a+)*")?; /// assert!(dfa.has_empty(), "+ inside of * matches empty"); /// /// // If a regex is just made of a look-around assertion, even if the /// // assertion requires some kind of non-empty string around it (such as /// // \b), then it is still treated as if it matches the empty string. /// // Namely, if a match occurs of just a look-around assertion, then the /// // match returned is empty. /// let dfa = DFA::builder() /// .configure(DFA::config().unicode_word_boundary(true)) /// .syntax(syntax::Config::new().utf8(false)) /// .build(r"^$\A\z\b\B(?-u:\b\B)")?; /// assert!(dfa.has_empty(), "assertions match empty"); /// // Even when an assertion is wrapped in a '+', it still matches the /// // empty string. /// let dfa = DFA::new(r"^+")?; /// assert!(dfa.has_empty(), "+ of an assertion matches empty"); /// /// // An alternation with even one branch that can match the empty string /// // is also said to match the empty string overall. /// let dfa = DFA::new("foo|(bar)?|quux")?; /// assert!(dfa.has_empty(), "alternations can match empty"); /// /// // An NFA that matches nothing does not match the empty string. /// let dfa = DFA::new("[a&&b]")?; /// assert!(!dfa.has_empty(), "never matching means not matching empty"); /// // But if it's wrapped in something that doesn't require a match at /// // all, then it can match the empty string! /// let dfa = DFA::new("[a&&b]*")?; /// assert!(dfa.has_empty(), "* on never-match still matches empty"); /// // Since a '+' requires a match, using it on something that can never /// // match will itself produce a regex that can never match anything, /// // and thus does not match the empty string. /// let dfa = DFA::new("[a&&b]+")?; /// assert!(!dfa.has_empty(), "+ on never-match still matches nothing"); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn has_empty(&self) -> bool; /// Whether UTF-8 mode is enabled for this DFA or not. /// /// When UTF-8 mode is enabled, all matches reported by a DFA are /// guaranteed to correspond to spans of valid UTF-8. This includes /// zero-width matches. For example, the DFA must guarantee that the empty /// regex will not match at the positions between code units in the UTF-8 /// encoding of a single codepoint. /// /// See [`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) for /// more information. /// /// # Example /// /// This example shows how UTF-8 mode can impact the match spans that may /// be reported in certain cases. /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton}, /// nfa::thompson, /// HalfMatch, Input, /// }; /// /// // UTF-8 mode is enabled by default. /// let re = DFA::new("")?; /// assert!(re.is_utf8()); /// let mut input = Input::new("☃"); /// let got = re.try_search_fwd(&input)?; /// assert_eq!(Some(HalfMatch::must(0, 0)), got); /// /// // Even though an empty regex matches at 1..1, our next match is /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is /// // three bytes long). /// input.set_start(1); /// let got = re.try_search_fwd(&input)?; /// assert_eq!(Some(HalfMatch::must(0, 3)), got); /// /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: /// let re = DFA::builder() /// .thompson(thompson::Config::new().utf8(false)) /// .build("")?; /// assert!(!re.is_utf8()); /// let got = re.try_search_fwd(&input)?; /// assert_eq!(Some(HalfMatch::must(0, 1)), got); /// /// input.set_start(2); /// let got = re.try_search_fwd(&input)?; /// assert_eq!(Some(HalfMatch::must(0, 2)), got); /// /// input.set_start(3); /// let got = re.try_search_fwd(&input)?; /// assert_eq!(Some(HalfMatch::must(0, 3)), got); /// /// input.set_start(4); /// let got = re.try_search_fwd(&input)?; /// assert_eq!(None, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn is_utf8(&self) -> bool; /// Returns true if and only if this DFA is limited to returning matches /// whose start position is `0`. /// /// Note that if you're using DFAs provided by /// this crate, then this is _orthogonal_ to /// [`Config::start_kind`](crate::dfa::dense::Config::start_kind). /// /// This is useful in some cases because if a DFA is limited to producing /// matches that start at offset `0`, then a reverse search is never /// required for finding the start of a match. /// /// # Example /// /// ``` /// use regex_automata::dfa::{dense::DFA, Automaton}; /// /// // The empty regex matches anywhere /// let dfa = DFA::new("")?; /// assert!(!dfa.is_always_start_anchored(), "empty matches anywhere"); /// // 'a' matches anywhere. /// let dfa = DFA::new("a")?; /// assert!(!dfa.is_always_start_anchored(), "'a' matches anywhere"); /// // '^' only matches at offset 0! /// let dfa = DFA::new("^a")?; /// assert!(dfa.is_always_start_anchored(), "'^a' matches only at 0"); /// // But '(?m:^)' matches at 0 but at other offsets too. /// let dfa = DFA::new("(?m:^)a")?; /// assert!(!dfa.is_always_start_anchored(), "'(?m:^)a' matches anywhere"); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` fn is_always_start_anchored(&self) -> bool; /// Return a slice of bytes to accelerate for the given state, if possible. /// /// If the given state has no accelerator, then an empty slice must be /// returned. If `Automaton::is_accel_state` returns true for the given ID, /// then this routine _must_ return a non-empty slice. But note that it is /// not required for an implementation of this trait to ever return `true` /// for `is_accel_state`, even if the state _could_ be accelerated. That /// is, acceleration is an optional optimization. But the return values of /// `is_accel_state` and `accelerator` must be in sync. /// /// If the given ID is not a valid state ID for this automaton, then /// implementations may panic or produce incorrect results. /// /// See [`Automaton::is_accel_state`] for more details on state /// acceleration. /// /// By default, this method will always return an empty slice. /// /// # Example /// /// This example shows a contrived case in which we build a regex that we /// know is accelerated and extract the accelerator from a state. /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, dense}, /// util::{primitives::StateID, syntax}, /// }; /// /// let dfa = dense::Builder::new() /// // We disable Unicode everywhere and permit the regex to match /// // invalid UTF-8. e.g., [^abc] matches \xFF, which is not valid /// // UTF-8. If we left Unicode enabled, [^abc] would match any UTF-8 /// // encoding of any Unicode scalar value except for 'a', 'b' or 'c'. /// // That translates to a much more complicated DFA, and also /// // inhibits the 'accelerator' optimization that we are trying to /// // demonstrate in this example. /// .syntax(syntax::Config::new().unicode(false).utf8(false)) /// .build("[^abc]+a")?; /// /// // Here we just pluck out the state that we know is accelerated. /// // While the stride calculations are something that can be relied /// // on by callers, the specific position of the accelerated state is /// // implementation defined. /// // /// // N.B. We get '3' by inspecting the state machine using 'regex-cli'. /// // e.g., try `regex-cli debug dfa dense '[^abc]+a' -BbUC`. /// let id = StateID::new(3 * dfa.stride()).unwrap(); /// let accelerator = dfa.accelerator(id); /// // The `[^abc]+` sub-expression permits [a, b, c] to be accelerated. /// assert_eq!(accelerator, &[b'a', b'b', b'c']); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] fn accelerator(&self, _id: StateID) -> &[u8] { &[] } /// Returns the prefilter associated with a DFA, if one exists. /// /// The default implementation of this trait always returns `None`. And /// indeed, it is always correct to return `None`. /// /// For DFAs in this crate, a prefilter can be attached to a DFA via /// [`dense::Config::prefilter`](crate::dfa::dense::Config::prefilter). /// /// Do note that prefilters are not serialized by DFAs in this crate. /// So if you deserialize a DFA that had a prefilter attached to it /// at serialization time, then it will not have a prefilter after /// deserialization. #[inline] fn get_prefilter(&self) -> Option<&Prefilter> { None } /// Executes a forward search and returns the end position of the leftmost /// match that is found. If no match exists, then `None` is returned. /// /// In particular, this method continues searching even after it enters /// a match state. The search only terminates once it has reached the /// end of the input or when it has entered a dead or quit state. Upon /// termination, the position of the last byte seen while still in a match /// state is returned. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Notes for implementors /// /// Implementors of this trait are not required to implement any particular /// match semantics (such as leftmost-first), which are instead manifest in /// the DFA's transitions. But this search routine should behave as a /// general "leftmost" search. /// /// In particular, this method must continue searching even after it enters /// a match state. The search should only terminate once it has reached /// the end of the input or when it has entered a dead or quit state. Upon /// termination, the position of the last byte seen while still in a match /// state is returned. /// /// Since this trait provides an implementation for this method by default, /// it's unlikely that one will need to implement this. /// /// # Example /// /// This example shows how to use this method with a /// [`dense::DFA`](crate::dfa::dense::DFA). /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; /// /// let dfa = dense::DFA::new("foo[0-9]+")?; /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(b"foo12345"))?); /// /// // Even though a match is found after reading the first byte (`a`), /// // the leftmost first match semantics demand that we find the earliest /// // match that prefers earlier parts of the pattern over latter parts. /// let dfa = dense::DFA::new("abc|a")?; /// let expected = Some(HalfMatch::must(0, 3)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(b"abc"))?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specific pattern search /// /// This example shows how to build a multi-DFA that permits searching for /// specific patterns. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::{Automaton, dense}, /// Anchored, HalfMatch, PatternID, Input, /// }; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().starts_for_each_pattern(true)) /// .build_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; /// let haystack = "foo123".as_bytes(); /// /// // Since we are using the default leftmost-first match and both /// // patterns match at the same starting position, only the first pattern /// // will be returned in this case when doing a search for any of the /// // patterns. /// let expected = Some(HalfMatch::must(0, 6)); /// let got = dfa.try_search_fwd(&Input::new(haystack))?; /// assert_eq!(expected, got); /// /// // But if we want to check whether some other pattern matches, then we /// // can provide its pattern ID. /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(1))); /// let expected = Some(HalfMatch::must(1, 6)); /// let got = dfa.try_search_fwd(&input)?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specifying the bounds of a search /// /// This example shows how providing the bounds of a search can produce /// different results than simply sub-slicing the haystack. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; /// /// // N.B. We disable Unicode here so that we use a simple ASCII word /// // boundary. Alternatively, we could enable heuristic support for /// // Unicode word boundaries. /// let dfa = dense::DFA::new(r"(?-u)\b[0-9]{3}\b")?; /// let haystack = "foo123bar".as_bytes(); /// /// // Since we sub-slice the haystack, the search doesn't know about the /// // larger context and assumes that `123` is surrounded by word /// // boundaries. And of course, the match position is reported relative /// // to the sub-slice as well, which means we get `3` instead of `6`. /// let input = Input::new(&haystack[3..6]); /// let expected = Some(HalfMatch::must(0, 3)); /// let got = dfa.try_search_fwd(&input)?; /// assert_eq!(expected, got); /// /// // But if we provide the bounds of the search within the context of the /// // entire haystack, then the search can take the surrounding context /// // into account. (And if we did find a match, it would be reported /// // as a valid offset into `haystack` instead of its sub-slice.) /// let input = Input::new(haystack).range(3..6); /// let expected = None; /// let got = dfa.try_search_fwd(&input)?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] fn try_search_fwd( &self, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { let utf8empty = self.has_empty() && self.is_utf8(); let hm = match search::find_fwd(&self, input)? { None => return Ok(None), Some(hm) if !utf8empty => return Ok(Some(hm)), Some(hm) => hm, }; // We get to this point when we know our DFA can match the empty string // AND when UTF-8 mode is enabled. In this case, we skip any matches // whose offset splits a codepoint. Such a match is necessarily a // zero-width match, because UTF-8 mode requires the underlying NFA // to be built such that all non-empty matches span valid UTF-8. // Therefore, any match that ends in the middle of a codepoint cannot // be part of a span of valid UTF-8 and thus must be an empty match. // In such cases, we skip it, so as not to report matches that split a // codepoint. // // Note that this is not a checked assumption. Callers *can* provide an // NFA with UTF-8 mode enabled but produces non-empty matches that span // invalid UTF-8. But doing so is documented to result in unspecified // behavior. empty::skip_splits_fwd(input, hm, hm.offset(), |input| { let got = search::find_fwd(&self, input)?; Ok(got.map(|hm| (hm, hm.offset()))) }) } /// Executes a reverse search and returns the start of the position of the /// leftmost match that is found. If no match exists, then `None` is /// returned. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to use this method with a /// [`dense::DFA`](crate::dfa::dense::DFA). In particular, this /// routine is principally useful when used in conjunction with the /// [`nfa::thompson::Config::reverse`](crate::nfa::thompson::Config::reverse) /// configuration. In general, it's unlikely to be correct to use /// both `try_search_fwd` and `try_search_rev` with the same DFA since /// any particular DFA will only support searching in one direction with /// respect to the pattern. /// /// ``` /// use regex_automata::{ /// nfa::thompson, /// dfa::{Automaton, dense}, /// HalfMatch, Input, /// }; /// /// let dfa = dense::Builder::new() /// .thompson(thompson::Config::new().reverse(true)) /// .build("foo[0-9]+")?; /// let expected = Some(HalfMatch::must(0, 0)); /// assert_eq!(expected, dfa.try_search_rev(&Input::new(b"foo12345"))?); /// /// // Even though a match is found after reading the last byte (`c`), /// // the leftmost first match semantics demand that we find the earliest /// // match that prefers earlier parts of the pattern over latter parts. /// let dfa = dense::Builder::new() /// .thompson(thompson::Config::new().reverse(true)) /// .build("abc|c")?; /// let expected = Some(HalfMatch::must(0, 0)); /// assert_eq!(expected, dfa.try_search_rev(&Input::new(b"abc"))?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: UTF-8 mode /// /// This examples demonstrates that UTF-8 mode applies to reverse /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all /// matches reported must correspond to valid UTF-8 spans. This includes /// prohibiting zero-width matches that split a codepoint. /// /// UTF-8 mode is enabled by default. Notice below how the only zero-width /// matches reported are those at UTF-8 boundaries: /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton}, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .thompson(thompson::Config::new().reverse(true)) /// .build(r"")?; /// /// // Run the reverse DFA to collect all matches. /// let mut input = Input::new("☃"); /// let mut matches = vec![]; /// loop { /// match dfa.try_search_rev(&input)? { /// None => break, /// Some(hm) => { /// matches.push(hm); /// if hm.offset() == 0 || input.end() == 0 { /// break; /// } else if hm.offset() < input.end() { /// input.set_end(hm.offset()); /// } else { /// // This is only necessary to handle zero-width /// // matches, which of course occur in this example. /// // Without this, the search would never advance /// // backwards beyond the initial match. /// input.set_end(input.end() - 1); /// } /// } /// } /// } /// /// // No matches split a codepoint. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Now let's look at the same example, but with UTF-8 mode on the /// original NFA disabled (which results in disabling UTF-8 mode on the /// DFA): /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton}, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .thompson(thompson::Config::new().reverse(true).utf8(false)) /// .build(r"")?; /// /// // Run the reverse DFA to collect all matches. /// let mut input = Input::new("☃"); /// let mut matches = vec![]; /// loop { /// match dfa.try_search_rev(&input)? { /// None => break, /// Some(hm) => { /// matches.push(hm); /// if hm.offset() == 0 || input.end() == 0 { /// break; /// } else if hm.offset() < input.end() { /// input.set_end(hm.offset()); /// } else { /// // This is only necessary to handle zero-width /// // matches, which of course occur in this example. /// // Without this, the search would never advance /// // backwards beyond the initial match. /// input.set_end(input.end() - 1); /// } /// } /// } /// } /// /// // No matches split a codepoint. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(0, 2), /// HalfMatch::must(0, 1), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] fn try_search_rev( &self, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { let utf8empty = self.has_empty() && self.is_utf8(); let hm = match search::find_rev(self, input)? { None => return Ok(None), Some(hm) if !utf8empty => return Ok(Some(hm)), Some(hm) => hm, }; empty::skip_splits_rev(input, hm, hm.offset(), |input| { let got = search::find_rev(self, input)?; Ok(got.map(|hm| (hm, hm.offset()))) }) } /// Executes an overlapping forward search. Matches, if one exists, can be /// obtained via the [`OverlappingState::get_match`] method. /// /// This routine is principally only useful when searching for multiple /// patterns on inputs where multiple patterns may match the same regions /// of text. In particular, callers must preserve the automaton's search /// state from prior calls so that the implementation knows where the last /// match occurred. /// /// When using this routine to implement an iterator of overlapping /// matches, the `start` of the search should always be set to the end /// of the last match. If more patterns match at the previous location, /// then they will be immediately returned. (This is tracked by the given /// overlapping state.) Otherwise, the search continues at the starting /// position given. /// /// If for some reason you want the search to forget about its previous /// state and restart the search at a particular position, then setting the /// state to [`OverlappingState::start`] will accomplish that. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to run a basic overlapping search with a /// [`dense::DFA`](crate::dfa::dense::DFA). Notice that we build the /// automaton with a `MatchKind::All` configuration. Overlapping searches /// are unlikely to work as one would expect when using the default /// `MatchKind::LeftmostFirst` match semantics, since leftmost-first /// matching is fundamentally incompatible with overlapping searches. /// Namely, overlapping searches need to report matches as they are seen, /// where as leftmost-first searches will continue searching even after a /// match has been observed in order to find the conventional end position /// of the match. More concretely, leftmost-first searches use dead states /// to terminate a search after a specific match can no longer be extended. /// Overlapping searches instead do the opposite by continuing the search /// to find totally new matches (potentially of other patterns). /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::{Automaton, OverlappingState, dense}, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().match_kind(MatchKind::All)) /// .build_many(&[r"[[:word:]]+$", r"[[:^space:]]+$"])?; /// let haystack = "@foo"; /// let mut state = OverlappingState::start(); /// /// let expected = Some(HalfMatch::must(1, 4)); /// dfa.try_search_overlapping_fwd(&Input::new(haystack), &mut state)?; /// assert_eq!(expected, state.get_match()); /// /// // The first pattern also matches at the same position, so re-running /// // the search will yield another match. Notice also that the first /// // pattern is returned after the second. This is because the second /// // pattern begins its match before the first, is therefore an earlier /// // match and is thus reported first. /// let expected = Some(HalfMatch::must(0, 4)); /// dfa.try_search_overlapping_fwd(&Input::new(haystack), &mut state)?; /// assert_eq!(expected, state.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] fn try_search_overlapping_fwd( &self, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { let utf8empty = self.has_empty() && self.is_utf8(); search::find_overlapping_fwd(self, input, state)?; match state.get_match() { None => Ok(()), Some(_) if !utf8empty => Ok(()), Some(_) => skip_empty_utf8_splits_overlapping( input, state, |input, state| { search::find_overlapping_fwd(self, input, state) }, ), } } /// Executes a reverse overlapping forward search. Matches, if one exists, /// can be obtained via the [`OverlappingState::get_match`] method. /// /// When using this routine to implement an iterator of overlapping /// matches, the `start` of the search should remain invariant throughout /// iteration. The `OverlappingState` given to the search will keep track /// of the current position of the search. (This is because multiple /// matches may be reported at the same position, so only the search /// implementation itself knows when to advance the position.) /// /// If for some reason you want the search to forget about its previous /// state and restart the search at a particular position, then setting the /// state to [`OverlappingState::start`] will accomplish that. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example: UTF-8 mode /// /// This examples demonstrates that UTF-8 mode applies to reverse /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all /// matches reported must correspond to valid UTF-8 spans. This includes /// prohibiting zero-width matches that split a codepoint. /// /// UTF-8 mode is enabled by default. Notice below how the only zero-width /// matches reported are those at UTF-8 boundaries: /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton, OverlappingState}, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .thompson(thompson::Config::new().reverse(true)) /// .build_many(&[r"", r"☃"])?; /// /// // Run the reverse DFA to collect all matches. /// let input = Input::new("☃"); /// let mut state = OverlappingState::start(); /// let mut matches = vec![]; /// loop { /// dfa.try_search_overlapping_rev(&input, &mut state)?; /// match state.get_match() { /// None => break, /// Some(hm) => matches.push(hm), /// } /// } /// /// // No matches split a codepoint. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(1, 0), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Now let's look at the same example, but with UTF-8 mode on the /// original NFA disabled (which results in disabling UTF-8 mode on the /// DFA): /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton, OverlappingState}, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .thompson(thompson::Config::new().reverse(true).utf8(false)) /// .build_many(&[r"", r"☃"])?; /// /// // Run the reverse DFA to collect all matches. /// let input = Input::new("☃"); /// let mut state = OverlappingState::start(); /// let mut matches = vec![]; /// loop { /// dfa.try_search_overlapping_rev(&input, &mut state)?; /// match state.get_match() { /// None => break, /// Some(hm) => matches.push(hm), /// } /// } /// /// // Now *all* positions match, even within a codepoint, /// // because we lifted the requirement that matches /// // correspond to valid UTF-8 spans. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(0, 2), /// HalfMatch::must(0, 1), /// HalfMatch::must(1, 0), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] fn try_search_overlapping_rev( &self, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { let utf8empty = self.has_empty() && self.is_utf8(); search::find_overlapping_rev(self, input, state)?; match state.get_match() { None => Ok(()), Some(_) if !utf8empty => Ok(()), Some(_) => skip_empty_utf8_splits_overlapping( input, state, |input, state| { search::find_overlapping_rev(self, input, state) }, ), } } /// Writes the set of patterns that match anywhere in the given search /// configuration to `patset`. If multiple patterns match at the same /// position and the underlying DFA supports overlapping matches, then all /// matching patterns are written to the given set. /// /// Unless all of the patterns in this DFA are anchored, then generally /// speaking, this will visit every byte in the haystack. /// /// This search routine *does not* clear the pattern set. This gives some /// flexibility to the caller (e.g., running multiple searches with the /// same pattern set), but does make the API bug-prone if you're reusing /// the same pattern set for multiple searches but intended them to be /// independent. /// /// If a pattern ID matched but the given `PatternSet` does not have /// sufficient capacity to store it, then it is not inserted and silently /// dropped. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the DFA quitting. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to find all matching patterns in a haystack, /// even when some patterns match at the same position as other patterns. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::{Automaton, dense::DFA}, /// Input, MatchKind, PatternSet, /// }; /// /// let patterns = &[ /// r"[[:word:]]+", /// r"[0-9]+", /// r"[[:alpha:]]+", /// r"foo", /// r"bar", /// r"barfoo", /// r"foobar", /// ]; /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build_many(patterns)?; /// /// let input = Input::new("foobar"); /// let mut patset = PatternSet::new(dfa.pattern_len()); /// dfa.try_which_overlapping_matches(&input, &mut patset)?; /// let expected = vec![0, 2, 3, 4, 6]; /// let got: Vec<usize> = patset.iter().map(|p| p.as_usize()).collect(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "alloc")] #[inline] fn try_which_overlapping_matches( &self, input: &Input<'_>, patset: &mut PatternSet, ) -> Result<(), MatchError> { let mut state = OverlappingState::start(); while let Some(m) = { self.try_search_overlapping_fwd(input, &mut state)?; state.get_match() } { let _ = patset.insert(m.pattern()); // There's nothing left to find, so we can stop. Or the caller // asked us to. if patset.is_full() || input.get_earliest() { break; } } Ok(()) } } unsafe impl<'a, A: Automaton + ?Sized> Automaton for &'a A { #[inline] fn next_state(&self, current: StateID, input: u8) -> StateID { (**self).next_state(current, input) } #[inline] unsafe fn next_state_unchecked( &self, current: StateID, input: u8, ) -> StateID { (**self).next_state_unchecked(current, input) } #[inline] fn next_eoi_state(&self, current: StateID) -> StateID { (**self).next_eoi_state(current) } #[inline] fn start_state_forward( &self, input: &Input<'_>, ) -> Result<StateID, MatchError> { (**self).start_state_forward(input) } #[inline] fn start_state_reverse( &self, input: &Input<'_>, ) -> Result<StateID, MatchError> { (**self).start_state_reverse(input) } #[inline] fn universal_start_state(&self, mode: Anchored) -> Option<StateID> { (**self).universal_start_state(mode) } #[inline] fn is_special_state(&self, id: StateID) -> bool { (**self).is_special_state(id) } #[inline] fn is_dead_state(&self, id: StateID) -> bool { (**self).is_dead_state(id) } #[inline] fn is_quit_state(&self, id: StateID) -> bool { (**self).is_quit_state(id) } #[inline] fn is_match_state(&self, id: StateID) -> bool { (**self).is_match_state(id) } #[inline] fn is_start_state(&self, id: StateID) -> bool { (**self).is_start_state(id) } #[inline] fn is_accel_state(&self, id: StateID) -> bool { (**self).is_accel_state(id) } #[inline] fn pattern_len(&self) -> usize { (**self).pattern_len() } #[inline] fn match_len(&self, id: StateID) -> usize { (**self).match_len(id) } #[inline] fn match_pattern(&self, id: StateID, index: usize) -> PatternID { (**self).match_pattern(id, index) } #[inline] fn has_empty(&self) -> bool { (**self).has_empty() } #[inline] fn is_utf8(&self) -> bool { (**self).is_utf8() } #[inline] fn is_always_start_anchored(&self) -> bool { (**self).is_always_start_anchored() } #[inline] fn accelerator(&self, id: StateID) -> &[u8] { (**self).accelerator(id) } #[inline] fn get_prefilter(&self) -> Option<&Prefilter> { (**self).get_prefilter() } #[inline] fn try_search_fwd( &self, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { (**self).try_search_fwd(input) } #[inline] fn try_search_rev( &self, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { (**self).try_search_rev(input) } #[inline] fn try_search_overlapping_fwd( &self, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { (**self).try_search_overlapping_fwd(input, state) } #[inline] fn try_search_overlapping_rev( &self, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { (**self).try_search_overlapping_rev(input, state) } #[cfg(feature = "alloc")] #[inline] fn try_which_overlapping_matches( &self, input: &Input<'_>, patset: &mut PatternSet, ) -> Result<(), MatchError> { (**self).try_which_overlapping_matches(input, patset) } } /// Represents the current state of an overlapping search. /// /// This is used for overlapping searches since they need to know something /// about the previous search. For example, when multiple patterns match at the /// same position, this state tracks the last reported pattern so that the next /// search knows whether to report another matching pattern or continue with /// the search at the next position. Additionally, it also tracks which state /// the last search call terminated in. /// /// This type provides little introspection capabilities. The only thing a /// caller can do is construct it and pass it around to permit search routines /// to use it to track state, and also ask whether a match has been found. /// /// Callers should always provide a fresh state constructed via /// [`OverlappingState::start`] when starting a new search. Reusing state from /// a previous search may result in incorrect results. #[derive(Clone, Debug, Eq, PartialEq)] pub struct OverlappingState { /// The match reported by the most recent overlapping search to use this /// state. /// /// If a search does not find any matches, then it is expected to clear /// this value. pub(crate) mat: Option<HalfMatch>, /// The state ID of the state at which the search was in when the call /// terminated. When this is a match state, `last_match` must be set to a /// non-None value. /// /// A `None` value indicates the start state of the corresponding /// automaton. We cannot use the actual ID, since any one automaton may /// have many start states, and which one is in use depends on several /// search-time factors. pub(crate) id: Option<StateID>, /// The position of the search. /// /// When `id` is None (i.e., we are starting a search), this is set to /// the beginning of the search as given by the caller regardless of its /// current value. Subsequent calls to an overlapping search pick up at /// this offset. pub(crate) at: usize, /// The index into the matching patterns of the next match to report if the /// current state is a match state. Note that this may be 1 greater than /// the total number of matches to report for the current match state. (In /// which case, no more matches should be reported at the current position /// and the search should advance to the next position.) pub(crate) next_match_index: Option<usize>, /// This is set to true when a reverse overlapping search has entered its /// EOI transitions. /// /// This isn't used in a forward search because it knows to stop once the /// position exceeds the end of the search range. In a reverse search, /// since we use unsigned offsets, we don't "know" once we've gone past /// `0`. So the only way to detect it is with this extra flag. The reverse /// overlapping search knows to terminate specifically after it has /// reported all matches after following the EOI transition. pub(crate) rev_eoi: bool, } impl OverlappingState { /// Create a new overlapping state that begins at the start state of any /// automaton. pub fn start() -> OverlappingState { OverlappingState { mat: None, id: None, at: 0, next_match_index: None, rev_eoi: false, } } /// Return the match result of the most recent search to execute with this /// state. /// /// A searches will clear this result automatically, such that if no /// match is found, this will correctly report `None`. pub fn get_match(&self) -> Option<HalfMatch> { self.mat } } /// Runs the given overlapping `search` function (forwards or backwards) until /// a match is found whose offset does not split a codepoint. /// /// This is *not* always correct to call. It should only be called when the DFA /// has UTF-8 mode enabled *and* it can produce zero-width matches. Calling /// this when both of those things aren't true might result in legitimate /// matches getting skipped. #[cold] #[inline(never)] fn skip_empty_utf8_splits_overlapping<F>( input: &Input<'_>, state: &mut OverlappingState, mut search: F, ) -> Result<(), MatchError> where F: FnMut(&Input<'_>, &mut OverlappingState) -> Result<(), MatchError>, { // Note that this routine works for forwards and reverse searches // even though there's no code here to handle those cases. That's // because overlapping searches drive themselves to completion via // `OverlappingState`. So all we have to do is push it until no matches are // found. let mut hm = match state.get_match() { None => return Ok(()), Some(hm) => hm, }; if input.get_anchored().is_anchored() { if !input.is_char_boundary(hm.offset()) { state.mat = None; } return Ok(()); } while !input.is_char_boundary(hm.offset()) { search(input, state)?; hm = match state.get_match() { None => return Ok(()), Some(hm) => hm, }; } Ok(()) } /// Write a prefix "state" indicator for fmt::Debug impls. /// /// Specifically, this tries to succinctly distinguish the different types of /// states: dead states, quit states, accelerated states, start states and /// match states. It even accounts for the possible overlappings of different /// state types. pub(crate) fn fmt_state_indicator<A: Automaton>( f: &mut core::fmt::Formatter<'_>, dfa: A, id: StateID, ) -> core::fmt::Result { if dfa.is_dead_state(id) { write!(f, "D")?; if dfa.is_start_state(id) { write!(f, ">")?; } else { write!(f, " ")?; } } else if dfa.is_quit_state(id) { write!(f, "Q ")?; } else if dfa.is_start_state(id) { if dfa.is_accel_state(id) { write!(f, "A>")?; } else { write!(f, " >")?; } } else if dfa.is_match_state(id) { if dfa.is_accel_state(id) { write!(f, "A*")?; } else { write!(f, " *")?; } } else if dfa.is_accel_state(id) { write!(f, "A ")?; } else { write!(f, " ")?; } Ok(()) } #[cfg(all(test, feature = "syntax", feature = "dfa-build"))] mod tests { // A basic test ensuring that our Automaton trait is object safe. (This is // the main reason why we don't define the search routines as generic over // Into<Input>.) #[test] fn object_safe() { use crate::{ dfa::{dense, Automaton}, HalfMatch, Input, }; let dfa = dense::DFA::new("abc").unwrap(); let dfa: &dyn Automaton = &dfa; assert_eq!( Ok(Some(HalfMatch::must(0, 6))), dfa.try_search_fwd(&Input::new(b"xyzabcxyz")), ); } } <file_sep>/regex-automata/src/meta/error.rs use regex_syntax::{ast, hir}; use crate::{nfa, util::search::MatchError, PatternID}; /// An error that occurs when construction of a `Regex` fails. /// /// A build error is generally a result of one of two possible failure /// modes. First is a parse or syntax error in the concrete syntax of a /// pattern. Second is that the construction of the underlying regex matcher /// fails, usually because it gets too big with respect to limits like /// [`Config::nfa_size_limit`](crate::meta::Config::nfa_size_limit). /// /// This error provides very little introspection capabilities. You can: /// /// * Ask for the [`PatternID`] of the pattern that caused an error, if one /// is available. This is available for things like syntax errors, but not for /// cases where build limits are exceeded. /// * Ask for the underlying syntax error, but only if the error is a syntax /// error. /// * Ask for a human readable message corresponding to the underlying error. /// * The `BuildError::source` method (from the `std::error::Error` /// trait implementation) may be used to query for an underlying error if one /// exists. There are no API guarantees about which error is returned. /// /// When the `std` feature is enabled, this implements `std::error::Error`. #[derive(Clone, Debug)] pub struct BuildError { kind: BuildErrorKind, } #[derive(Clone, Debug)] enum BuildErrorKind { Syntax { pid: PatternID, err: regex_syntax::Error }, NFA(nfa::thompson::BuildError), } impl BuildError { /// If it is known which pattern ID caused this build error to occur, then /// this method returns it. /// /// Some errors are not associated with a particular pattern. However, any /// errors that occur as part of parsing a pattern are guaranteed to be /// associated with a pattern ID. /// /// # Example /// /// ``` /// use regex_automata::{meta::Regex, PatternID}; /// /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err(); /// assert_eq!(Some(PatternID::must(2)), err.pattern()); /// ``` pub fn pattern(&self) -> Option<PatternID> { match self.kind { BuildErrorKind::Syntax { pid, .. } => Some(pid), _ => None, } } /// If this error occurred because the regex exceeded the configured size /// limit before being built, then this returns the configured size limit. /// /// The limit returned is what was configured, and corresponds to the /// maximum amount of heap usage in bytes. pub fn size_limit(&self) -> Option<usize> { match self.kind { BuildErrorKind::NFA(ref err) => err.size_limit(), _ => None, } } /// If this error corresponds to a syntax error, then a reference to it is /// returned by this method. pub fn syntax_error(&self) -> Option<&regex_syntax::Error> { match self.kind { BuildErrorKind::Syntax { ref err, .. } => Some(err), _ => None, } } pub(crate) fn ast(pid: PatternID, err: ast::Error) -> BuildError { let err = regex_syntax::Error::from(err); BuildError { kind: BuildErrorKind::Syntax { pid, err } } } pub(crate) fn hir(pid: PatternID, err: hir::Error) -> BuildError { let err = regex_syntax::Error::from(err); BuildError { kind: BuildErrorKind::Syntax { pid, err } } } pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError { BuildError { kind: BuildErrorKind::NFA(err) } } } #[cfg(feature = "std")] impl std::error::Error for BuildError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self.kind { BuildErrorKind::Syntax { ref err, .. } => Some(err), BuildErrorKind::NFA(ref err) => Some(err), } } } impl core::fmt::Display for BuildError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self.kind { BuildErrorKind::Syntax { pid, .. } => { write!(f, "error parsing pattern {}", pid.as_usize()) } BuildErrorKind::NFA(_) => write!(f, "error building NFA"), } } } /// An error that occurs when a search should be retried. /// /// This retry error distinguishes between two different failure modes. /// /// The first is one where potential quadratic behavior has been detected. /// In this case, whatever optimization that led to this behavior should be /// stopped, and the next best strategy should be used. /// /// The second indicates that the underlying regex engine has failed for some /// reason. This usually occurs because either a lazy DFA's cache has become /// ineffective or because a non-ASCII byte has been seen *and* a Unicode word /// boundary was used in one of the patterns. In this failure case, a different /// regex engine that won't fail in these ways (PikeVM, backtracker or the /// one-pass DFA) should be used. /// /// This is an internal error only and should never bleed into the public /// API. #[derive(Debug)] pub(crate) enum RetryError { Quadratic(RetryQuadraticError), Fail(RetryFailError), } #[cfg(feature = "std")] impl std::error::Error for RetryError {} impl core::fmt::Display for RetryError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match *self { RetryError::Quadratic(ref err) => err.fmt(f), RetryError::Fail(ref err) => err.fmt(f), } } } impl From<MatchError> for RetryError { fn from(merr: MatchError) -> RetryError { RetryError::Fail(RetryFailError::from(merr)) } } /// An error that occurs when potential quadratic behavior has been detected /// when applying either the "reverse suffix" or "reverse inner" optimizations. /// /// When this error occurs, callers should abandon the "reverse" optimization /// and use a normal forward search. #[derive(Debug)] pub(crate) struct RetryQuadraticError(()); impl RetryQuadraticError { pub(crate) fn new() -> RetryQuadraticError { RetryQuadraticError(()) } } #[cfg(feature = "std")] impl std::error::Error for RetryQuadraticError {} impl core::fmt::Display for RetryQuadraticError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "regex engine gave up to avoid quadratic behavior") } } impl From<RetryQuadraticError> for RetryError { fn from(err: RetryQuadraticError) -> RetryError { RetryError::Quadratic(err) } } /// An error that occurs when a regex engine "gives up" for some reason before /// finishing a search. Usually this occurs because of heuristic Unicode word /// boundary support or because of ineffective cache usage in the lazy DFA. /// /// When this error occurs, callers should retry the regex search with a /// different regex engine. /// /// Note that this has convenient `From` impls that will automatically /// convert a `MatchError` into this error. This works because the meta /// regex engine internals guarantee that errors like `HaystackTooLong` and /// `UnsupportedAnchored` will never occur. The only errors left are `Quit` and /// `GaveUp`, which both correspond to this "failure" error. #[derive(Debug)] pub(crate) struct RetryFailError { offset: usize, } impl RetryFailError { pub(crate) fn from_offset(offset: usize) -> RetryFailError { RetryFailError { offset } } } #[cfg(feature = "std")] impl std::error::Error for RetryFailError {} impl core::fmt::Display for RetryFailError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "regex engine failed at offset {:?}", self.offset) } } impl From<RetryFailError> for RetryError { fn from(err: RetryFailError) -> RetryError { RetryError::Fail(err) } } impl From<MatchError> for RetryFailError { fn from(merr: MatchError) -> RetryFailError { use crate::util::search::MatchErrorKind::*; match *merr.kind() { Quit { offset, .. } => RetryFailError::from_offset(offset), GaveUp { offset } => RetryFailError::from_offset(offset), // These can never occur because we avoid them by construction // or with higher level control flow logic. For example, the // backtracker's wrapper will never hand out a backtracker engine // when the haystack would be too long. HaystackTooLong { .. } | UnsupportedAnchored { .. } => { unreachable!("found impossible error in meta engine: {}", merr) } } } } <file_sep>/testdata/multiline.toml [[test]] name = "basic1" regex = '(?m)^[a-z]+$' haystack = "abc\ndef\nxyz" matches = [[0, 3], [4, 7], [8, 11]] [[test]] name = "basic1-crlf" regex = '(?Rm)^[a-z]+$' haystack = "abc\ndef\nxyz" matches = [[0, 3], [4, 7], [8, 11]] [[test]] name = "basic1-crlf-cr" regex = '(?Rm)^[a-z]+$' haystack = "abc\rdef\rxyz" matches = [[0, 3], [4, 7], [8, 11]] [[test]] name = "basic2" regex = '(?m)^$' haystack = "abc\ndef\nxyz" matches = [] [[test]] name = "basic2-crlf" regex = '(?Rm)^$' haystack = "abc\ndef\nxyz" matches = [] [[test]] name = "basic2-crlf-cr" regex = '(?Rm)^$' haystack = "abc\rdef\rxyz" matches = [] [[test]] name = "basic3" regex = '(?m)^' haystack = "abc\ndef\nxyz" matches = [[0, 0], [4, 4], [8, 8]] [[test]] name = "basic3-crlf" regex = '(?Rm)^' haystack = "abc\ndef\nxyz" matches = [[0, 0], [4, 4], [8, 8]] [[test]] name = "basic3-crlf-cr" regex = '(?Rm)^' haystack = "abc\rdef\rxyz" matches = [[0, 0], [4, 4], [8, 8]] [[test]] name = "basic4" regex = '(?m)$' haystack = "abc\ndef\nxyz" matches = [[3, 3], [7, 7], [11, 11]] [[test]] name = "basic4-crlf" regex = '(?Rm)$' haystack = "abc\ndef\nxyz" matches = [[3, 3], [7, 7], [11, 11]] [[test]] name = "basic4-crlf-cr" regex = '(?Rm)$' haystack = "abc\rdef\rxyz" matches = [[3, 3], [7, 7], [11, 11]] [[test]] name = "basic5" regex = '(?m)^[a-z]' haystack = "abc\ndef\nxyz" matches = [[0, 1], [4, 5], [8, 9]] [[test]] name = "basic5-crlf" regex = '(?Rm)^[a-z]' haystack = "abc\ndef\nxyz" matches = [[0, 1], [4, 5], [8, 9]] [[test]] name = "basic5-crlf-cr" regex = '(?Rm)^[a-z]' haystack = "abc\rdef\rxyz" matches = [[0, 1], [4, 5], [8, 9]] [[test]] name = "basic6" regex = '(?m)[a-z]^' haystack = "abc\ndef\nxyz" matches = [] [[test]] name = "basic6-crlf" regex = '(?Rm)[a-z]^' haystack = "abc\ndef\nxyz" matches = [] [[test]] name = "basic6-crlf-cr" regex = '(?Rm)[a-z]^' haystack = "abc\rdef\rxyz" matches = [] [[test]] name = "basic7" regex = '(?m)[a-z]$' haystack = "abc\ndef\nxyz" matches = [[2, 3], [6, 7], [10, 11]] [[test]] name = "basic7-crlf" regex = '(?Rm)[a-z]$' haystack = "abc\ndef\nxyz" matches = [[2, 3], [6, 7], [10, 11]] [[test]] name = "basic7-crlf-cr" regex = '(?Rm)[a-z]$' haystack = "abc\rdef\rxyz" matches = [[2, 3], [6, 7], [10, 11]] [[test]] name = "basic8" regex = '(?m)$[a-z]' haystack = "abc\ndef\nxyz" matches = [] [[test]] name = "basic8-crlf" regex = '(?Rm)$[a-z]' haystack = "abc\ndef\nxyz" matches = [] [[test]] name = "basic8-crlf-cr" regex = '(?Rm)$[a-z]' haystack = "abc\rdef\rxyz" matches = [] [[test]] name = "basic9" regex = '(?m)^$' haystack = "" matches = [[0, 0]] [[test]] name = "basic9-crlf" regex = '(?Rm)^$' haystack = "" matches = [[0, 0]] [[test]] name = "repeat1" regex = '(?m)(?:^$)*' haystack = "a\nb\nc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "repeat1-crlf" regex = '(?Rm)(?:^$)*' haystack = "a\nb\nc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "repeat1-crlf-cr" regex = '(?Rm)(?:^$)*' haystack = "a\rb\rc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "repeat1-no-multi" regex = '(?:^$)*' haystack = "a\nb\nc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "repeat1-no-multi-crlf" regex = '(?R)(?:^$)*' haystack = "a\nb\nc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "repeat1-no-multi-crlf-cr" regex = '(?R)(?:^$)*' haystack = "a\rb\rc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "repeat2" regex = '(?m)(?:^|a)+' haystack = "a\naaa\n" matches = [[0, 0], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat2-crlf" regex = '(?Rm)(?:^|a)+' haystack = "a\naaa\n" matches = [[0, 0], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat2-crlf-cr" regex = '(?Rm)(?:^|a)+' haystack = "a\raaa\r" matches = [[0, 0], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat2-no-multi" regex = '(?:^|a)+' haystack = "a\naaa\n" matches = [[0, 0], [2, 5]] [[test]] name = "repeat2-no-multi-crlf" regex = '(?R)(?:^|a)+' haystack = "a\naaa\n" matches = [[0, 0], [2, 5]] [[test]] name = "repeat2-no-multi-crlf-cr" regex = '(?R)(?:^|a)+' haystack = "a\raaa\r" matches = [[0, 0], [2, 5]] [[test]] name = "repeat3" regex = '(?m)(?:^|a)*' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat3-crlf" regex = '(?Rm)(?:^|a)*' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat3-crlf-cr" regex = '(?Rm)(?:^|a)*' haystack = "a\raaa\r" matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat3-no-multi" regex = '(?:^|a)*' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 5], [6, 6]] [[test]] name = "repeat3-no-multi-crlf" regex = '(?R)(?:^|a)*' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 5], [6, 6]] [[test]] name = "repeat3-no-multi-crlf-cr" regex = '(?R)(?:^|a)*' haystack = "a\raaa\r" matches = [[0, 0], [1, 1], [2, 5], [6, 6]] [[test]] name = "repeat4" regex = '(?m)(?:^|a+)' haystack = "a\naaa\n" matches = [[0, 0], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat4-crlf" regex = '(?Rm)(?:^|a+)' haystack = "a\naaa\n" matches = [[0, 0], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat4-crlf-cr" regex = '(?Rm)(?:^|a+)' haystack = "a\raaa\r" matches = [[0, 0], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat4-no-multi" regex = '(?:^|a+)' haystack = "a\naaa\n" matches = [[0, 0], [2, 5]] [[test]] name = "repeat4-no-multi-crlf" regex = '(?R)(?:^|a+)' haystack = "a\naaa\n" matches = [[0, 0], [2, 5]] [[test]] name = "repeat4-no-multi-crlf-cr" regex = '(?R)(?:^|a+)' haystack = "a\raaa\r" matches = [[0, 0], [2, 5]] [[test]] name = "repeat5" regex = '(?m)(?:^|a*)' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat5-crlf" regex = '(?Rm)(?:^|a*)' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat5-crlf-cr" regex = '(?Rm)(?:^|a*)' haystack = "a\raaa\r" matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] [[test]] name = "repeat5-no-multi" regex = '(?:^|a*)' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 5], [6, 6]] [[test]] name = "repeat5-no-multi-crlf" regex = '(?R)(?:^|a*)' haystack = "a\naaa\n" matches = [[0, 0], [1, 1], [2, 5], [6, 6]] [[test]] name = "repeat5-no-multi-crlf-cr" regex = '(?R)(?:^|a*)' haystack = "a\raaa\r" matches = [[0, 0], [1, 1], [2, 5], [6, 6]] [[test]] name = "repeat6" regex = '(?m)(?:^[a-z])+' haystack = "abc\ndef\nxyz" matches = [[0, 1], [4, 5], [8, 9]] [[test]] name = "repeat6-crlf" regex = '(?Rm)(?:^[a-z])+' haystack = "abc\ndef\nxyz" matches = [[0, 1], [4, 5], [8, 9]] [[test]] name = "repeat6-crlf-cr" regex = '(?Rm)(?:^[a-z])+' haystack = "abc\rdef\rxyz" matches = [[0, 1], [4, 5], [8, 9]] [[test]] name = "repeat6-no-multi" regex = '(?:^[a-z])+' haystack = "abc\ndef\nxyz" matches = [[0, 1]] [[test]] name = "repeat6-no-multi-crlf" regex = '(?R)(?:^[a-z])+' haystack = "abc\ndef\nxyz" matches = [[0, 1]] [[test]] name = "repeat6-no-multi-crlf-cr" regex = '(?R)(?:^[a-z])+' haystack = "abc\rdef\rxyz" matches = [[0, 1]] [[test]] name = "repeat7" regex = '(?m)(?:^[a-z]{3}\n?)+' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat7-crlf" regex = '(?Rm)(?:^[a-z]{3}\n?)+' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat7-crlf-cr" regex = '(?Rm)(?:^[a-z]{3}\r?)+' haystack = "abc\rdef\rxyz" matches = [[0, 11]] [[test]] name = "repeat7-no-multi" regex = '(?:^[a-z]{3}\n?)+' haystack = "abc\ndef\nxyz" matches = [[0, 4]] [[test]] name = "repeat7-no-multi-crlf" regex = '(?R)(?:^[a-z]{3}\n?)+' haystack = "abc\ndef\nxyz" matches = [[0, 4]] [[test]] name = "repeat7-no-multi-crlf-cr" regex = '(?R)(?:^[a-z]{3}\r?)+' haystack = "abc\rdef\rxyz" matches = [[0, 4]] [[test]] name = "repeat8" regex = '(?m)(?:^[a-z]{3}\n?)*' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat8-crlf" regex = '(?Rm)(?:^[a-z]{3}\n?)*' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat8-crlf-cr" regex = '(?Rm)(?:^[a-z]{3}\r?)*' haystack = "abc\rdef\rxyz" matches = [[0, 11]] [[test]] name = "repeat8-no-multi" regex = '(?:^[a-z]{3}\n?)*' haystack = "abc\ndef\nxyz" matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] [[test]] name = "repeat8-no-multi-crlf" regex = '(?R)(?:^[a-z]{3}\n?)*' haystack = "abc\ndef\nxyz" matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] [[test]] name = "repeat8-no-multi-crlf-cr" regex = '(?R)(?:^[a-z]{3}\r?)*' haystack = "abc\rdef\rxyz" matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] [[test]] name = "repeat9" regex = '(?m)(?:\n?[a-z]{3}$)+' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat9-crlf" regex = '(?Rm)(?:\n?[a-z]{3}$)+' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat9-crlf-cr" regex = '(?Rm)(?:\r?[a-z]{3}$)+' haystack = "abc\rdef\rxyz" matches = [[0, 11]] [[test]] name = "repeat9-no-multi" regex = '(?:\n?[a-z]{3}$)+' haystack = "abc\ndef\nxyz" matches = [[7, 11]] [[test]] name = "repeat9-no-multi-crlf" regex = '(?R)(?:\n?[a-z]{3}$)+' haystack = "abc\ndef\nxyz" matches = [[7, 11]] [[test]] name = "repeat9-no-multi-crlf-cr" regex = '(?R)(?:\r?[a-z]{3}$)+' haystack = "abc\rdef\rxyz" matches = [[7, 11]] [[test]] name = "repeat10" regex = '(?m)(?:\n?[a-z]{3}$)*' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat10-crlf" regex = '(?Rm)(?:\n?[a-z]{3}$)*' haystack = "abc\ndef\nxyz" matches = [[0, 11]] [[test]] name = "repeat10-crlf-cr" regex = '(?Rm)(?:\r?[a-z]{3}$)*' haystack = "abc\rdef\rxyz" matches = [[0, 11]] [[test]] name = "repeat10-no-multi" regex = '(?:\n?[a-z]{3}$)*' haystack = "abc\ndef\nxyz" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] [[test]] name = "repeat10-no-multi-crlf" regex = '(?R)(?:\n?[a-z]{3}$)*' haystack = "abc\ndef\nxyz" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] [[test]] name = "repeat10-no-multi-crlf-cr" regex = '(?R)(?:\r?[a-z]{3}$)*' haystack = "abc\rdef\rxyz" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] [[test]] name = "repeat11" regex = '(?m)^*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat11-crlf" regex = '(?Rm)^*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat11-crlf-cr" regex = '(?Rm)^*' haystack = "\raa\r" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat11-no-multi" regex = '^*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat11-no-multi-crlf" regex = '(?R)^*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat11-no-multi-crlf-cr" regex = '(?R)^*' haystack = "\raa\r" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat12" regex = '(?m)^+' haystack = "\naa\n" matches = [[0, 0], [1, 1], [4, 4]] [[test]] name = "repeat12-crlf" regex = '(?Rm)^+' haystack = "\naa\n" matches = [[0, 0], [1, 1], [4, 4]] [[test]] name = "repeat12-crlf-cr" regex = '(?Rm)^+' haystack = "\raa\r" matches = [[0, 0], [1, 1], [4, 4]] [[test]] name = "repeat12-no-multi" regex = '^+' haystack = "\naa\n" matches = [[0, 0]] [[test]] name = "repeat12-no-multi-crlf" regex = '(?R)^+' haystack = "\naa\n" matches = [[0, 0]] [[test]] name = "repeat12-no-multi-crlf-cr" regex = '(?R)^+' haystack = "\raa\r" matches = [[0, 0]] [[test]] name = "repeat13" regex = '(?m)$*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat13-crlf" regex = '(?Rm)$*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat13-crlf-cr" regex = '(?Rm)$*' haystack = "\raa\r" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat13-no-multi" regex = '$*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat13-no-multi-crlf" regex = '(?R)$*' haystack = "\naa\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat13-no-multi-crlf-cr" regex = '(?R)$*' haystack = "\raa\r" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] [[test]] name = "repeat14" regex = '(?m)$+' haystack = "\naa\n" matches = [[0, 0], [3, 3], [4, 4]] [[test]] name = "repeat14-crlf" regex = '(?Rm)$+' haystack = "\naa\n" matches = [[0, 0], [3, 3], [4, 4]] [[test]] name = "repeat14-crlf-cr" regex = '(?Rm)$+' haystack = "\raa\r" matches = [[0, 0], [3, 3], [4, 4]] [[test]] name = "repeat14-no-multi" regex = '$+' haystack = "\naa\n" matches = [[4, 4]] [[test]] name = "repeat14-no-multi-crlf" regex = '(?R)$+' haystack = "\naa\n" matches = [[4, 4]] [[test]] name = "repeat14-no-multi-crlf-cr" regex = '(?R)$+' haystack = "\raa\r" matches = [[4, 4]] [[test]] name = "repeat15" regex = '(?m)(?:$\n)+' haystack = "\n\naaa\n\n" matches = [[0, 2], [5, 7]] [[test]] name = "repeat15-crlf" regex = '(?Rm)(?:$\n)+' haystack = "\n\naaa\n\n" matches = [[0, 2], [5, 7]] [[test]] name = "repeat15-crlf-cr" regex = '(?Rm)(?:$\r)+' haystack = "\r\raaa\r\r" matches = [[0, 2], [5, 7]] [[test]] name = "repeat15-no-multi" regex = '(?:$\n)+' haystack = "\n\naaa\n\n" matches = [] [[test]] name = "repeat15-no-multi-crlf" regex = '(?R)(?:$\n)+' haystack = "\n\naaa\n\n" matches = [] [[test]] name = "repeat15-no-multi-crlf-cr" regex = '(?R)(?:$\r)+' haystack = "\r\raaa\r\r" matches = [] [[test]] name = "repeat16" regex = '(?m)(?:$\n)*' haystack = "\n\naaa\n\n" matches = [[0, 2], [3, 3], [4, 4], [5, 7]] [[test]] name = "repeat16-crlf" regex = '(?Rm)(?:$\n)*' haystack = "\n\naaa\n\n" matches = [[0, 2], [3, 3], [4, 4], [5, 7]] [[test]] name = "repeat16-crlf-cr" regex = '(?Rm)(?:$\r)*' haystack = "\r\raaa\r\r" matches = [[0, 2], [3, 3], [4, 4], [5, 7]] [[test]] name = "repeat16-no-multi" regex = '(?:$\n)*' haystack = "\n\naaa\n\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] [[test]] name = "repeat16-no-multi-crlf" regex = '(?R)(?:$\n)*' haystack = "\n\naaa\n\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] [[test]] name = "repeat16-no-multi-crlf-cr" regex = '(?R)(?:$\r)*' haystack = "\r\raaa\r\r" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] [[test]] name = "repeat17" regex = '(?m)(?:$\n^)+' haystack = "\n\naaa\n\n" matches = [[0, 2], [5, 7]] [[test]] name = "repeat17-crlf" regex = '(?Rm)(?:$\n^)+' haystack = "\n\naaa\n\n" matches = [[0, 2], [5, 7]] [[test]] name = "repeat17-crlf-cr" regex = '(?Rm)(?:$\r^)+' haystack = "\r\raaa\r\r" matches = [[0, 2], [5, 7]] [[test]] name = "repeat17-no-multi" regex = '(?:$\n^)+' haystack = "\n\naaa\n\n" matches = [] [[test]] name = "repeat17-no-multi-crlf" regex = '(?R)(?:$\n^)+' haystack = "\n\naaa\n\n" matches = [] [[test]] name = "repeat17-no-multi-crlf-cr" regex = '(?R)(?:$\r^)+' haystack = "\r\raaa\r\r" matches = [] [[test]] name = "repeat18" regex = '(?m)(?:^|$)+' haystack = "\n\naaa\n\n" matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] [[test]] name = "repeat18-crlf" regex = '(?Rm)(?:^|$)+' haystack = "\n\naaa\n\n" matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] [[test]] name = "repeat18-crlf-cr" regex = '(?Rm)(?:^|$)+' haystack = "\r\raaa\r\r" matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] [[test]] name = "repeat18-no-multi" regex = '(?:^|$)+' haystack = "\n\naaa\n\n" matches = [[0, 0], [7, 7]] [[test]] name = "repeat18-no-multi-crlf" regex = '(?R)(?:^|$)+' haystack = "\n\naaa\n\n" matches = [[0, 0], [7, 7]] [[test]] name = "repeat18-no-multi-crlf-cr" regex = '(?R)(?:^|$)+' haystack = "\r\raaa\r\r" matches = [[0, 0], [7, 7]] [[test]] name = "match-line-100" regex = '(?m)^.+$' haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" matches = [[0, 2], [3, 22]] [[test]] name = "match-line-100-crlf" regex = '(?Rm)^.+$' haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" matches = [[0, 2], [3, 22]] [[test]] name = "match-line-100-crlf-cr" regex = '(?Rm)^.+$' haystack = "aa\raaaaaaaaaaaaaaaaaaa\r" matches = [[0, 2], [3, 22]] [[test]] name = "match-line-200" regex = '(?m)^.+$' haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" matches = [[0, 2], [3, 22]] unicode = false utf8 = false [[test]] name = "match-line-200-crlf" regex = '(?Rm)^.+$' haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" matches = [[0, 2], [3, 22]] unicode = false utf8 = false [[test]] name = "match-line-200-crlf-cr" regex = '(?Rm)^.+$' haystack = "aa\raaaaaaaaaaaaaaaaaaa\r" matches = [[0, 2], [3, 22]] unicode = false utf8 = false <file_sep>/regex-capi/ctest/test.c #include <assert.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "rure.h" #ifndef DEBUG #define DEBUG false #endif bool test_is_match() { bool passed = true; const char *haystack = "snowman: \xE2\x98\x83"; rure *re = rure_compile_must("\\p{So}$"); bool matched = rure_is_match(re, (const uint8_t *)haystack, strlen(haystack), 0); if (!matched) { if (DEBUG) { fprintf(stderr, "[test_is_match] expected match, but got no match\n"); } passed = false; } rure_free(re); return passed; } bool test_shortest_match() { bool passed = true; const char *haystack = "aaaaa"; rure *re = rure_compile_must("a+"); size_t end = 0; bool matched = rure_shortest_match(re, (const uint8_t *)haystack, strlen(haystack), 0, &end); if (!matched) { if (DEBUG) { fprintf(stderr, "[test_shortest_match] expected match, " "but got no match\n"); } passed = false; } size_t expect_end = 1; if (end != expect_end) { if (DEBUG) { fprintf(stderr, "[test_shortest_match] expected match end location %zu " "but got %zu\n", expect_end, end); } passed = false; } rure_free(re); return passed; } bool test_find() { bool passed = true; const char *haystack = "snowman: \xE2\x98\x83"; rure *re = rure_compile_must("\\p{So}$"); rure_match match = {0}; bool matched = rure_find(re, (const uint8_t *)haystack, strlen(haystack), 0, &match); if (!matched) { if (DEBUG) { fprintf(stderr, "[test_find] expected match, but got no match\n"); } passed = false; } size_t expect_start = 9; size_t expect_end = 12; if (match.start != expect_start || match.end != expect_end) { if (DEBUG) { fprintf(stderr, "[test_find] expected match at (%zu, %zu), but " "got match at (%zu, %zu)\n", expect_start, expect_end, match.start, match.end); } passed = false; } rure_free(re); return passed; } bool test_captures() { bool passed = true; const char *haystack = "snowman: \xE2\x98\x83"; rure *re = rure_compile_must(".(.*(?P<snowman>\\p{So}))$"); rure_match match = {0}; rure_captures *caps = rure_captures_new(re); bool matched = rure_find_captures(re, (const uint8_t *)haystack, strlen(haystack), 0, caps); if (!matched) { if (DEBUG) { fprintf(stderr, "[test_captures] expected match, but got no match\n"); } passed = false; } size_t expect_captures_len = 3; size_t captures_len = rure_captures_len(caps); if (captures_len != expect_captures_len) { if (DEBUG) { fprintf(stderr, "[test_captures] " "expected capture group length to be %zd, but " "got %zd\n", expect_captures_len, captures_len); } passed = false; goto done; } int32_t expect_capture_index = 2; int32_t capture_index = rure_capture_name_index(re, "snowman"); if (capture_index != expect_capture_index) { if (DEBUG) { fprintf(stderr, "[test_captures] " "expected capture index %d for name 'snowman', but " "got %d\n", expect_capture_index, capture_index); } passed = false; goto done; } size_t expect_start = 9; size_t expect_end = 12; rure_captures_at(caps, 2, &match); if (match.start != expect_start || match.end != expect_end) { if (DEBUG) { fprintf(stderr, "[test_captures] " "expected capture 2 match at (%zu, %zu), " "but got match at (%zu, %zu)\n", expect_start, expect_end, match.start, match.end); } passed = false; } done: rure_captures_free(caps); rure_free(re); return passed; } bool test_iter() { bool passed = true; const uint8_t *haystack = (const uint8_t *)"abc xyz"; size_t haystack_len = strlen((const char *)haystack); rure *re = rure_compile_must("\\w+(\\w)"); rure_match match = {0}; rure_captures *caps = rure_captures_new(re); rure_iter *it = rure_iter_new(re); bool matched = rure_iter_next(it, haystack, haystack_len, &match); if (!matched) { if (DEBUG) { fprintf(stderr, "[test_iter] expected first match, but got no match\n"); } passed = false; goto done; } size_t expect_start = 0; size_t expect_end = 3; if (match.start != expect_start || match.end != expect_end) { if (DEBUG) { fprintf(stderr, "[test_iter] expected first match at (%zu, %zu), but " "got match at (%zu, %zu)\n", expect_start, expect_end, match.start, match.end); } passed = false; goto done; } matched = rure_iter_next_captures(it, haystack, haystack_len, caps); if (!matched) { if (DEBUG) { fprintf(stderr, "[test_iter] expected second match, but got no match\n"); } passed = false; goto done; } rure_captures_at(caps, 1, &match); expect_start = 6; expect_end = 7; if (match.start != expect_start || match.end != expect_end) { if (DEBUG) { fprintf(stderr, "[test_iter] expected second match at (%zu, %zu), but " "got match at (%zu, %zu)\n", expect_start, expect_end, match.start, match.end); } passed = false; goto done; } done: rure_iter_free(it); rure_captures_free(caps); rure_free(re); return passed; } bool test_iter_capture_name(char *expect, char *given) { bool passed = true; if (strcmp(expect, given)) { if (DEBUG) { fprintf(stderr, "[test_iter_capture_name] expected first capture " "name '%s' got '%s'\n", expect, given); } passed = false; } return passed; } bool test_iter_capture_names() { bool passed = true; char *name; rure *re = rure_compile_must( "(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})"); rure_iter_capture_names *it = rure_iter_capture_names_new(re); bool result = rure_iter_capture_names_next(it, &name); if (!result) { if (DEBUG) { fprintf(stderr, "[test_iter_capture_names] expected a second name, " "but got none\n"); } passed = false; goto done; } result = rure_iter_capture_names_next(it, &name); passed = test_iter_capture_name("year", name); if (!passed) { goto done; } result = rure_iter_capture_names_next(it, &name); passed = test_iter_capture_name("month", name); if (!passed) { goto done; } result = rure_iter_capture_names_next(it, &name); passed = test_iter_capture_name("day", name); if (!passed) { goto done; } done: rure_iter_capture_names_free(it); rure_free(re); return passed; } /* * This tests whether we can set the flags correctly. In this case, we disable * all flags, which includes disabling Unicode mode. When we disable Unicode * mode, we can match arbitrary possibly invalid UTF-8 bytes, such as \xFF. * (When Unicode mode is enabled, \xFF won't match .) */ bool test_flags() { bool passed = true; const char *pattern = "."; const char *haystack = "\xFF"; rure *re = rure_compile((const uint8_t *)pattern, strlen(pattern), 0, NULL, NULL); bool matched = rure_is_match(re, (const uint8_t *)haystack, strlen(haystack), 0); if (!matched) { if (DEBUG) { fprintf(stderr, "[test_flags] expected match, but got no match\n"); } passed = false; } rure_free(re); return passed; } bool test_compile_error() { bool passed = true; rure_error *err = rure_error_new(); rure *re = rure_compile((const uint8_t *)"(", 1, 0, NULL, err); if (re != NULL) { if (DEBUG) { fprintf(stderr, "[test_compile_error] " "expected NULL regex pointer, but got non-NULL pointer\n"); } passed = false; rure_free(re); } const char *msg = rure_error_message(err); if (NULL == strstr(msg, "unclosed group")) { if (DEBUG) { fprintf(stderr, "[test_compile_error] " "expected an 'unclosed parenthesis' error message, but " "got this instead: '%s'\n", msg); } passed = false; } rure_error_free(err); return passed; } bool test_compile_error_size_limit() { bool passed = true; rure_options *opts = rure_options_new(); rure_options_size_limit(opts, 0); rure_error *err = rure_error_new(); rure *re = rure_compile((const uint8_t *)"\\w{100}", 8, 0, opts, err); if (re != NULL) { if (DEBUG) { fprintf(stderr, "[test_compile_error_size_limit] " "expected NULL regex pointer, but got non-NULL pointer\n"); } passed = false; rure_free(re); } const char *msg = rure_error_message(err); if (NULL == strstr(msg, "exceeds size")) { if (DEBUG) { fprintf(stderr, "[test_compile_error] " "expected an 'exceeds size' error message, but " "got this instead: '%s'\n", msg); } passed = false; } rure_options_free(opts); rure_error_free(err); return passed; } bool test_regex_set_matches() { #define PAT_COUNT 6 bool passed = true; const char *patterns[] = { "foo", "barfoo", "\\w+", "\\d+", "foobar", "bar" }; const size_t patterns_lengths[] = { 3, 6, 3, 3, 6, 3 }; rure_error *err = rure_error_new(); rure_set *re = rure_compile_set((const uint8_t **) patterns, patterns_lengths, PAT_COUNT, 0, NULL, err); if (re == NULL) { passed = false; goto done2; } if (rure_set_len(re) != PAT_COUNT) { passed = false; goto done1; } if (!rure_set_is_match(re, (const uint8_t *) "foobar", 6, 0)) { passed = false; goto done1; } if (rure_set_is_match(re, (const uint8_t *) "", 0, 0)) { passed = false; goto done1; } bool matches[PAT_COUNT]; if (!rure_set_matches(re, (const uint8_t *) "foobar", 6, 0, matches)) { passed = false; goto done1; } const bool match_target[] = { true, false, true, false, true, true }; int i; for (i = 0; i < PAT_COUNT; ++i) { if (matches[i] != match_target[i]) { passed = false; goto done1; } } done1: rure_set_free(re); done2: rure_error_free(err); return passed; #undef PAT_COUNT } bool test_regex_set_match_start() { #define PAT_COUNT 3 bool passed = true; const char *patterns[] = { "foo", "bar", "fooo" }; const size_t patterns_lengths[] = { 3, 3, 4 }; rure_error *err = rure_error_new(); rure_set *re = rure_compile_set((const uint8_t **) patterns, patterns_lengths, PAT_COUNT, 0, NULL, err); if (re == NULL) { passed = false; goto done2; } if (rure_set_len(re) != PAT_COUNT) { passed = false; goto done1; } if (rure_set_is_match(re, (const uint8_t *)"foobiasdr", 7, 2)) { passed = false; goto done1; } { bool matches[PAT_COUNT]; if (!rure_set_matches(re, (const uint8_t *)"fooobar", 8, 0, matches)) { passed = false; goto done1; } const bool match_target[] = { true, true, true }; int i; for (i = 0; i < PAT_COUNT; ++i) { if (matches[i] != match_target[i]) { passed = false; goto done1; } } } { bool matches[PAT_COUNT]; if (!rure_set_matches(re, (const uint8_t *)"fooobar", 7, 1, matches)) { passed = false; goto done1; } const bool match_target[] = { false, true, false }; int i; for (i = 0; i < PAT_COUNT; ++i) { if (matches[i] != match_target[i]) { passed = false; goto done1; } } } done1: rure_set_free(re); done2: rure_error_free(err); return passed; #undef PAT_COUNT } bool test_regex_set_options() { bool passed = true; rure_options *opts = rure_options_new(); rure_options_size_limit(opts, 0); rure_error *err = rure_error_new(); const char *patterns[] = { "\\w{100}" }; const size_t patterns_lengths[] = { 8 }; rure_set *re = rure_compile_set( (const uint8_t **) patterns, patterns_lengths, 1, 0, opts, err); if (re != NULL) { if (DEBUG) { fprintf(stderr, "[test_compile_error_size_limit] " "expected NULL regex pointer, but got non-NULL pointer\n"); } passed = false; rure_set_free(re); } const char *msg = rure_error_message(err); if (NULL == strstr(msg, "exceeds size")) { if (DEBUG) { fprintf(stderr, "[test_compile_error] " "expected an 'exceeds size' error message, but " "got this instead: '%s'\n", msg); } passed = false; } rure_options_free(opts); rure_error_free(err); return passed; } bool test_escape() { bool passed = true; const char *pattern = "^[a-z]+.*$"; const char *expected_escaped = "\\^\\[a\\-z\\]\\+\\.\\*\\$"; const char *escaped = rure_escape_must(pattern); if (!escaped) { if (DEBUG) { fprintf(stderr, "[test_captures] expected escaped, but got no escaped\n"); } passed = false; } else if (strcmp(escaped, expected_escaped) != 0) { if (DEBUG) { fprintf(stderr, "[test_captures] expected \"%s\", but got \"%s\"\n", expected_escaped, escaped); } passed = false; } rure_cstring_free((char *) escaped); return passed; } void run_test(bool (test)(), const char *name, bool *passed) { if (!test()) { *passed = false; fprintf(stderr, "FAILED: %s\n", name); } else { fprintf(stderr, "PASSED: %s\n", name); } } int main() { bool passed = true; run_test(test_is_match, "test_is_match", &passed); run_test(test_shortest_match, "test_shortest_match", &passed); run_test(test_find, "test_find", &passed); run_test(test_captures, "test_captures", &passed); run_test(test_iter, "test_iter", &passed); run_test(test_iter_capture_names, "test_iter_capture_names", &passed); run_test(test_flags, "test_flags", &passed); run_test(test_compile_error, "test_compile_error", &passed); run_test(test_compile_error_size_limit, "test_compile_error_size_limit", &passed); run_test(test_regex_set_matches, "test_regex_set_match", &passed); run_test(test_regex_set_options, "test_regex_set_options", &passed); run_test(test_regex_set_match_start, "test_regex_set_match_start", &passed); run_test(test_escape, "test_escape", &passed); if (!passed) { exit(1); } return 0; } <file_sep>/regex-automata/src/hybrid/regex.rs /*! A lazy DFA backed `Regex`. This module provides a [`Regex`] backed by a lazy DFA. A `Regex` implements convenience routines you might have come to expect, such as finding a match and iterating over all non-overlapping matches. This `Regex` type is limited in its capabilities to what a lazy DFA can provide. Therefore, APIs involving capturing groups, for example, are not provided. Internally, a `Regex` is composed of two DFAs. One is a "forward" DFA that finds the end offset of a match, where as the other is a "reverse" DFA that find the start offset of a match. See the [parent module](crate::hybrid) for examples. */ use crate::{ hybrid::{ dfa::{self, DFA}, error::BuildError, }, nfa::thompson, util::{ iter, search::{Anchored, Input, Match, MatchError, MatchKind}, }, }; /// A regular expression that uses hybrid NFA/DFAs (also called "lazy DFAs") /// for searching. /// /// A regular expression is comprised of two lazy DFAs, a "forward" DFA and a /// "reverse" DFA. The forward DFA is responsible for detecting the end of /// a match while the reverse DFA is responsible for detecting the start /// of a match. Thus, in order to find the bounds of any given match, a /// forward search must first be run followed by a reverse search. A match /// found by the forward DFA guarantees that the reverse DFA will also find /// a match. /// /// # Fallibility /// /// Most of the search routines defined on this type will _panic_ when the /// underlying search fails. This might be because the DFA gave up because it /// saw a quit byte, whether configured explicitly or via heuristic Unicode /// word boundary support, although neither are enabled by default. It might /// also fail if the underlying DFA determines it isn't making effective use of /// the cache (which also never happens by default). Or it might fail because /// an invalid `Input` configuration is given, for example, with an unsupported /// [`Anchored`] mode. /// /// If you need to handle these error cases instead of allowing them to trigger /// a panic, then the lower level [`Regex::try_search`] provides a fallible API /// that never panics. /// /// # Example /// /// This example shows how to cause a search to terminate if it sees a /// `\n` byte, and handle the error returned. This could be useful if, for /// example, you wanted to prevent a user supplied pattern from matching /// across a line boundary. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::{dfa, regex::Regex}, Input, MatchError}; /// /// let re = Regex::builder() /// .dfa(dfa::Config::new().quit(b'\n', true)) /// .build(r"foo\p{any}+bar")?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("foo\nbar"); /// // Normally this would produce a match, since \p{any} contains '\n'. /// // But since we instructed the automaton to enter a quit state if a /// // '\n' is observed, this produces a match error instead. /// let expected = MatchError::quit(b'\n', 3); /// let got = re.try_search(&mut cache, &input).unwrap_err(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Debug)] pub struct Regex { /// The forward lazy DFA. This can only find the end of a match. forward: DFA, /// The reverse lazy DFA. This can only find the start of a match. /// /// This is built with 'all' match semantics (instead of leftmost-first) /// so that it always finds the longest possible match (which corresponds /// to the leftmost starting position). It is also compiled as an anchored /// matcher and has 'starts_for_each_pattern' enabled. Including starting /// states for each pattern is necessary to ensure that we only look for /// matches of a pattern that matched in the forward direction. Otherwise, /// we might wind up finding the "leftmost" starting position of a totally /// different pattern! reverse: DFA, } /// Convenience routines for regex and cache construction. impl Regex { /// Parse the given regular expression using the default configuration and /// return the corresponding regex. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{hybrid::regex::Regex, Match}; /// /// let re = Regex::new("foo[0-9]+bar")?; /// let mut cache = re.create_cache(); /// assert_eq!( /// Some(Match::must(0, 3..14)), /// re.find(&mut cache, "zzzfoo12345barzzz"), /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new(pattern: &str) -> Result<Regex, BuildError> { Regex::builder().build(pattern) } /// Like `new`, but parses multiple patterns into a single "multi regex." /// This similarly uses the default regex configuration. /// /// # Example /// /// ``` /// use regex_automata::{hybrid::regex::Regex, Match}; /// /// let re = Regex::new_many(&["[a-z]+", "[0-9]+"])?; /// let mut cache = re.create_cache(); /// /// let mut it = re.find_iter(&mut cache, "abc 1 foo 4567 0 quux"); /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new_many<P: AsRef<str>>( patterns: &[P], ) -> Result<Regex, BuildError> { Regex::builder().build_many(patterns) } /// Return a builder for configuring the construction of a `Regex`. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. /// /// # Example /// /// This example shows how to use the builder to disable UTF-8 mode /// everywhere. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// hybrid::regex::Regex, nfa::thompson, util::syntax, Match, /// }; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let mut cache = re.create_cache(); /// /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 1..9)); /// let got = re.find(&mut cache, haystack); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn builder() -> Builder { Builder::new() } /// Create a new cache for this `Regex`. /// /// The cache returned should only be used for searches for this /// `Regex`. If you want to reuse the cache for another `Regex`, then /// you must call [`Cache::reset`] with that `Regex` (or, equivalently, /// [`Regex::reset_cache`]). pub fn create_cache(&self) -> Cache { Cache::new(self) } /// Reset the given cache such that it can be used for searching with the /// this `Regex` (and only this `Regex`). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different `Regex`. /// /// Resetting a cache sets its "clear count" to 0. This is relevant if the /// `Regex` has been configured to "give up" after it has cleared the cache /// a certain number of times. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different `Regex`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::regex::Regex, Match}; /// /// let re1 = Regex::new(r"\w")?; /// let re2 = Regex::new(r"\W")?; /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Match::must(0, 0..2)), /// re1.find(&mut cache, "Δ"), /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the Regex we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// re2.reset_cache(&mut cache); /// assert_eq!( /// Some(Match::must(0, 0..3)), /// re2.find(&mut cache, "☃"), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset_cache(&self, cache: &mut Cache) { self.forward().reset_cache(&mut cache.forward); self.reverse().reset_cache(&mut cache.reverse); } } /// Standard infallible search routines for finding and iterating over matches. impl Regex { /// Returns true if and only if this regex matches the given haystack. /// /// This routine may short circuit if it knows that scanning future input /// will never lead to a different result. In particular, if the underlying /// DFA enters a match state or a dead state, then this routine will return /// `true` or `false`, respectively, without inspecting any future input. /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// Use [`Regex::try_search`] if you want to handle these error conditions. /// /// # Example /// /// ``` /// use regex_automata::hybrid::regex::Regex; /// /// let re = Regex::new("foo[0-9]+bar")?; /// let mut cache = re.create_cache(); /// /// assert!(re.is_match(&mut cache, "foo12345bar")); /// assert!(!re.is_match(&mut cache, "foobar")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_match<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> bool { // Not only can we do an "earliest" search, but we can avoid doing a // reverse scan too. self.forward() .try_search_fwd(&mut cache.forward, &input.into().earliest(true)) .unwrap() .is_some() } /// Returns the start and end offset of the leftmost match. If no match /// exists, then `None` is returned. /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// Use [`Regex::try_search`] if you want to handle these error conditions. /// /// # Example /// /// ``` /// use regex_automata::{Match, hybrid::regex::Regex}; /// /// let re = Regex::new("foo[0-9]+")?; /// let mut cache = re.create_cache(); /// assert_eq!( /// Some(Match::must(0, 3..11)), /// re.find(&mut cache, "zzzfoo12345zzz"), /// ); /// /// // Even though a match is found after reading the first byte (`a`), /// // the default leftmost-first match semantics demand that we find the /// // earliest match that prefers earlier parts of the pattern over latter /// // parts. /// let re = Regex::new("abc|a")?; /// let mut cache = re.create_cache(); /// assert_eq!(Some(Match::must(0, 0..3)), re.find(&mut cache, "abc")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> Option<Match> { self.try_search(cache, &input.into()).unwrap() } /// Returns an iterator over all non-overlapping leftmost matches in the /// given bytes. If no match exists, then the iterator yields no elements. /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// The above conditions also apply to the iterator returned as well. For /// example, if the lazy DFA gives up or quits during a search using this /// method, then a panic will occur during iteration. /// /// Use [`Regex::try_search`] with [`util::iter::Searcher`](iter::Searcher) /// if you want to handle these error conditions. /// /// # Example /// /// ``` /// use regex_automata::{hybrid::regex::Regex, Match}; /// /// let re = Regex::new("foo[0-9]+")?; /// let mut cache = re.create_cache(); /// /// let text = "foo1 foo12 foo123"; /// let matches: Vec<Match> = re.find_iter(&mut cache, text).collect(); /// assert_eq!(matches, vec![ /// Match::must(0, 0..4), /// Match::must(0, 5..10), /// Match::must(0, 11..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find_iter<'r, 'c, 'h, I: Into<Input<'h>>>( &'r self, cache: &'c mut Cache, input: I, ) -> FindMatches<'r, 'c, 'h> { let it = iter::Searcher::new(input.into()); FindMatches { re: self, cache, it } } } /// Lower level "search" primitives that accept a `&Input` for cheap reuse /// and return an error if one occurs instead of panicking. impl Regex { /// Returns the start and end offset of the leftmost match. If no match /// exists, then `None` is returned. /// /// This is like [`Regex::find`] but with two differences: /// /// 1. It is not generic over `Into<Input>` and instead accepts a /// `&Input`. This permits reusing the same `Input` for multiple searches /// without needing to create a new one. This _may_ help with latency. /// 2. It returns an error if the search could not complete where as /// [`Regex::find`] will panic. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. #[inline] pub fn try_search( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<Match>, MatchError> { let (fcache, rcache) = (&mut cache.forward, &mut cache.reverse); let end = match self.forward().try_search_fwd(fcache, input)? { None => return Ok(None), Some(end) => end, }; // This special cases an empty match at the beginning of the search. If // our end matches our start, then since a reverse DFA can't match past // the start, it must follow that our starting position is also our end // position. So short circuit and skip the reverse search. if input.start() == end.offset() { return Ok(Some(Match::new( end.pattern(), end.offset()..end.offset(), ))); } // We can also skip the reverse search if we know our search was // anchored. This occurs either when the input config is anchored or // when we know the regex itself is anchored. In this case, we know the // start of the match, if one is found, must be the start of the // search. if self.is_anchored(input) { return Ok(Some(Match::new( end.pattern(), input.start()..end.offset(), ))); } // N.B. I have tentatively convinced myself that it isn't necessary // to specify the specific pattern for the reverse search since the // reverse search will always find the same pattern to match as the // forward search. But I lack a rigorous proof. Why not just provide // the pattern anyway? Well, if it is needed, then leaving it out // gives us a chance to find a witness. (Also, if we don't need to // specify the pattern, then we don't need to build the reverse DFA // with 'starts_for_each_pattern' enabled. It doesn't matter too much // for the lazy DFA, but does make the overall DFA bigger.) // // We also need to be careful to disable 'earliest' for the reverse // search, since it could be enabled for the forward search. In the // reverse case, to satisfy "leftmost" criteria, we need to match as // much as we can. We also need to be careful to make the search // anchored. We don't want the reverse search to report any matches // other than the one beginning at the end of our forward search. let revsearch = input .clone() .span(input.start()..end.offset()) .anchored(Anchored::Yes) .earliest(false); let start = self .reverse() .try_search_rev(rcache, &revsearch)? .expect("reverse search must match if forward search does"); debug_assert_eq!( start.pattern(), end.pattern(), "forward and reverse search must match same pattern", ); debug_assert!(start.offset() <= end.offset()); Ok(Some(Match::new(end.pattern(), start.offset()..end.offset()))) } /// Returns true if either the given input specifies an anchored search /// or if the underlying NFA is always anchored. fn is_anchored(&self, input: &Input<'_>) -> bool { match input.get_anchored() { Anchored::No => { self.forward().get_nfa().is_always_start_anchored() } Anchored::Yes | Anchored::Pattern(_) => true, } } } /// Non-search APIs for querying information about the regex and setting a /// prefilter. impl Regex { /// Return the underlying lazy DFA responsible for forward matching. /// /// This is useful for accessing the underlying lazy DFA and using it /// directly if the situation calls for it. pub fn forward(&self) -> &DFA { &self.forward } /// Return the underlying lazy DFA responsible for reverse matching. /// /// This is useful for accessing the underlying lazy DFA and using it /// directly if the situation calls for it. pub fn reverse(&self) -> &DFA { &self.reverse } /// Returns the total number of patterns matched by this regex. /// /// # Example /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::hybrid::regex::Regex; /// /// let re = Regex::new_many(&[r"[a-z]+", r"[0-9]+", r"\w+"])?; /// assert_eq!(3, re.pattern_len()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn pattern_len(&self) -> usize { assert_eq!(self.forward().pattern_len(), self.reverse().pattern_len()); self.forward().pattern_len() } } /// An iterator over all non-overlapping matches for an infallible search. /// /// The iterator yields a [`Match`] value until no more matches could be found. /// If the underlying regex engine returns an error, then a panic occurs. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the regex object. /// * `'h` represents the lifetime of the haystack being searched. /// * `'c` represents the lifetime of the regex cache. /// /// This iterator can be created with the [`Regex::find_iter`] method. #[derive(Debug)] pub struct FindMatches<'r, 'c, 'h> { re: &'r Regex, cache: &'c mut Cache, it: iter::Searcher<'h>, } impl<'r, 'c, 'h> Iterator for FindMatches<'r, 'c, 'h> { type Item = Match; #[inline] fn next(&mut self) -> Option<Match> { let FindMatches { re, ref mut cache, ref mut it } = *self; it.advance(|input| re.try_search(cache, input)) } } /// A cache represents a partially computed forward and reverse DFA. /// /// A cache is the key component that differentiates a classical DFA and a /// hybrid NFA/DFA (also called a "lazy DFA"). Where a classical DFA builds a /// complete transition table that can handle all possible inputs, a hybrid /// NFA/DFA starts with an empty transition table and builds only the parts /// required during search. The parts that are built are stored in a cache. For /// this reason, a cache is a required parameter for nearly every operation on /// a [`Regex`]. /// /// Caches can be created from their corresponding `Regex` via /// [`Regex::create_cache`]. A cache can only be used with either the `Regex` /// that created it, or the `Regex` that was most recently used to reset it /// with [`Cache::reset`]. Using a cache with any other `Regex` may result in /// panics or incorrect results. #[derive(Debug, Clone)] pub struct Cache { forward: dfa::Cache, reverse: dfa::Cache, } impl Cache { /// Create a new cache for the given `Regex`. /// /// The cache returned should only be used for searches for the given /// `Regex`. If you want to reuse the cache for another `Regex`, then you /// must call [`Cache::reset`] with that `Regex`. pub fn new(re: &Regex) -> Cache { let forward = dfa::Cache::new(re.forward()); let reverse = dfa::Cache::new(re.reverse()); Cache { forward, reverse } } /// Reset this cache such that it can be used for searching with the given /// `Regex` (and only that `Regex`). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different `Regex`. /// /// Resetting a cache sets its "clear count" to 0. This is relevant if the /// `Regex` has been configured to "give up" after it has cleared the cache /// a certain number of times. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different `Regex`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::regex::Regex, Match}; /// /// let re1 = Regex::new(r"\w")?; /// let re2 = Regex::new(r"\W")?; /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Match::must(0, 0..2)), /// re1.find(&mut cache, "Δ"), /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the Regex we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// cache.reset(&re2); /// assert_eq!( /// Some(Match::must(0, 0..3)), /// re2.find(&mut cache, "☃"), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset(&mut self, re: &Regex) { self.forward.reset(re.forward()); self.reverse.reset(re.reverse()); } /// Return a reference to the forward cache. pub fn forward(&mut self) -> &dfa::Cache { &self.forward } /// Return a reference to the reverse cache. pub fn reverse(&mut self) -> &dfa::Cache { &self.reverse } /// Return a mutable reference to the forward cache. /// /// If you need mutable references to both the forward and reverse caches, /// then use [`Cache::as_parts_mut`]. pub fn forward_mut(&mut self) -> &mut dfa::Cache { &mut self.forward } /// Return a mutable reference to the reverse cache. /// /// If you need mutable references to both the forward and reverse caches, /// then use [`Cache::as_parts_mut`]. pub fn reverse_mut(&mut self) -> &mut dfa::Cache { &mut self.reverse } /// Return references to the forward and reverse caches, respectively. pub fn as_parts(&self) -> (&dfa::Cache, &dfa::Cache) { (&self.forward, &self.reverse) } /// Return mutable references to the forward and reverse caches, /// respectively. pub fn as_parts_mut(&mut self) -> (&mut dfa::Cache, &mut dfa::Cache) { (&mut self.forward, &mut self.reverse) } /// Returns the heap memory usage, in bytes, as a sum of the forward and /// reverse lazy DFA caches. /// /// This does **not** include the stack size used up by this cache. To /// compute that, use `std::mem::size_of::<Cache>()`. pub fn memory_usage(&self) -> usize { self.forward.memory_usage() + self.reverse.memory_usage() } } /// A builder for a regex based on a hybrid NFA/DFA. /// /// This builder permits configuring options for the syntax of a pattern, the /// NFA construction, the lazy DFA construction and finally the regex searching /// itself. This builder is different from a general purpose regex builder /// in that it permits fine grain configuration of the construction process. /// The trade off for this is complexity, and the possibility of setting a /// configuration that might not make sense. For example, there are two /// different UTF-8 modes: /// /// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls /// whether the pattern itself can contain sub-expressions that match invalid /// UTF-8. /// * [`thompson::Config::utf8`] controls how the regex iterators themselves /// advance the starting position of the next search when a match with zero /// length is found. /// /// Generally speaking, callers will want to either enable all of these or /// disable all of these. /// /// Internally, building a regex requires building two hybrid NFA/DFAs, /// where one is responsible for finding the end of a match and the other is /// responsible for finding the start of a match. If you only need to detect /// whether something matched, or only the end of a match, then you should use /// a [`dfa::Builder`] to construct a single hybrid NFA/DFA, which is cheaper /// than building two of them. /// /// # Example /// /// This example shows how to disable UTF-8 mode in the syntax and the regex /// itself. This is generally what you want for matching on arbitrary bytes. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// hybrid::regex::Regex, nfa::thompson, util::syntax, Match, /// }; /// /// let re = Regex::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let mut cache = re.create_cache(); /// /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 1..9)); /// let got = re.find(&mut cache, haystack); /// assert_eq!(expected, got); /// // Notice that `(?-u:[^b])` matches invalid UTF-8, /// // but the subsequent `.*` does not! Disabling UTF-8 /// // on the syntax permits this. /// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Builder { dfa: dfa::Builder, } impl Builder { /// Create a new regex builder with the default configuration. pub fn new() -> Builder { Builder { dfa: DFA::builder() } } /// Build a regex from the given pattern. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(feature = "syntax")] pub fn build(&self, pattern: &str) -> Result<Regex, BuildError> { self.build_many(&[pattern]) } /// Build a regex from the given patterns. #[cfg(feature = "syntax")] pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<Regex, BuildError> { let forward = self.dfa.build_many(patterns)?; let reverse = self .dfa .clone() .configure( DFA::config() .prefilter(None) .specialize_start_states(false) .match_kind(MatchKind::All), ) .thompson(thompson::Config::new().reverse(true)) .build_many(patterns)?; Ok(self.build_from_dfas(forward, reverse)) } /// Build a regex from its component forward and reverse hybrid NFA/DFAs. /// /// This is useful when you've built a forward and reverse lazy DFA /// separately, and want to combine them into a single regex. Once build, /// the individual DFAs given can still be accessed via [`Regex::forward`] /// and [`Regex::reverse`]. /// /// It is important that the reverse lazy DFA be compiled under the /// following conditions: /// /// * It should use [`MatchKind::All`] semantics. /// * It should match in reverse. /// * Otherwise, its configuration should match the forward DFA. /// /// If these conditions aren't satisfied, then the behavior of searches is /// unspecified. /// /// Note that when using this constructor, no configuration is applied. /// Since this routine provides the DFAs to the builder, there is no /// opportunity to apply other configuration options. /// /// # Example /// /// This shows how to build individual lazy forward and reverse DFAs, and /// then combine them into a single `Regex`. /// /// ``` /// use regex_automata::{ /// hybrid::{dfa::DFA, regex::Regex}, /// nfa::thompson, /// MatchKind, /// }; /// /// let fwd = DFA::new(r"foo[0-9]+")?; /// let rev = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .thompson(thompson::Config::new().reverse(true)) /// .build(r"foo[0-9]+")?; /// /// let re = Regex::builder().build_from_dfas(fwd, rev); /// let mut cache = re.create_cache(); /// assert_eq!(true, re.is_match(&mut cache, "foo123")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_from_dfas(&self, forward: DFA, reverse: DFA) -> Regex { Regex { forward, reverse } } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. #[cfg(feature = "syntax")] pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { self.dfa.syntax(config); self } /// Set the Thompson NFA configuration for this builder using /// [`nfa::thompson::Config`](thompson::Config). /// /// This permits setting things like whether additional time should be /// spent shrinking the size of the NFA. #[cfg(feature = "syntax")] pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { self.dfa.thompson(config); self } /// Set the lazy DFA compilation configuration for this builder using /// [`dfa::Config`](dfa::Config). /// /// This permits setting things like whether Unicode word boundaries should /// be heuristically supported or settings how the behavior of the cache. pub fn dfa(&mut self, config: dfa::Config) -> &mut Builder { self.dfa.configure(config); self } } impl Default for Builder { fn default() -> Builder { Builder::new() } } <file_sep>/README.md regex ===== This crate provides routines for searching strings for matches of a [regular expression] (aka "regex"). The regex syntax supported by this crate is similar to other regex engines, but it lacks several features that are not known how to implement efficiently. This includes, but is not limited to, look-around and backreferences. In exchange, all regex searches in this crate have worst case `O(m * n)` time complexity, where `m` is proportional to the size of the regex and `n` is proportional to the size of the string being searched. [regular expression]: https://en.wikipedia.org/wiki/Regular_expression [![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) [![Crates.io](https://img.shields.io/crates/v/regex.svg)](https://crates.io/crates/regex) ### Documentation [Module documentation with examples](https://docs.rs/regex). The module documentation also includes a comprehensive description of the syntax supported. Documentation with examples for the various matching functions and iterators can be found on the [`Regex` type](https://docs.rs/regex/*/regex/struct.Regex.html). ### Usage To bring this crate into your repository, either add `regex` to your `Cargo.toml`, or run `cargo add regex`. Here's a simple example that matches a date in YYYY-MM-DD format and prints the year, month and day: ```rust use regex::Regex; fn main() { let re = Regex::new(r"(?x) (?P<year>\d{4}) # the year - (?P<month>\d{2}) # the month - (?P<day>\d{2}) # the day ").unwrap(); let caps = re.captures("2010-03-14").unwrap(); assert_eq!("2010", &caps["year"]); assert_eq!("03", &caps["month"]); assert_eq!("14", &caps["day"]); } ``` If you have lots of dates in text that you'd like to iterate over, then it's easy to adapt the above example with an iterator: ```rust use regex::Regex; fn main() { let re = Regex::new(r"(\d{4})-(\d{2})-(\d{2})").unwrap(); let hay = "On 2010-03-14, foo happened. On 2014-10-14, bar happened."; let mut dates = vec![]; for (_, [year, month, day]) in re.captures_iter(hay).map(|c| c.extract()) { dates.push((year, month, day)); } assert_eq!(dates, vec![ ("2010", "03", "14"), ("2014", "10", "14"), ]); } ``` ### Usage: Avoid compiling the same regex in a loop It is an anti-pattern to compile the same regular expression in a loop since compilation is typically expensive. (It takes anywhere from a few microseconds to a few **milliseconds** depending on the size of the regex.) Not only is compilation itself expensive, but this also prevents optimizations that reuse allocations internally to the matching engines. In Rust, it can sometimes be a pain to pass regular expressions around if they're used from inside a helper function. Instead, we recommend using the [`once_cell`](https://crates.io/crates/once_cell) crate to ensure that regular expressions are compiled exactly once. For example: ```rust use { once_cell::sync::Lazy, regex::Regex, }; fn some_helper_function(haystack: &str) -> bool { static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"...").unwrap()); RE.is_match(haystack) } fn main() { assert!(some_helper_function("abc")); assert!(!some_helper_function("ac")); } ``` Specifically, in this example, the regex will be compiled when it is used for the first time. On subsequent uses, it will reuse the previous compilation. ### Usage: match regular expressions on `&[u8]` The main API of this crate (`regex::Regex`) requires the caller to pass a `&str` for searching. In Rust, an `&str` is required to be valid UTF-8, which means the main API can't be used for searching arbitrary bytes. To match on arbitrary bytes, use the `regex::bytes::Regex` API. The API is identical to the main API, except that it takes an `&[u8]` to search on instead of an `&str`. The `&[u8]` APIs also permit disabling Unicode mode in the regex even when the pattern would match invalid UTF-8. For example, `(?-u:.)` is not allowed in `regex::Regex` but is allowed in `regex::bytes::Regex` since `(?-u:.)` matches any byte except for `\n`. Conversely, `.` will match the UTF-8 encoding of any Unicode scalar value except for `\n`. This example shows how to find all null-terminated strings in a slice of bytes: ```rust use regex::bytes::Regex; let re = Regex::new(r"(?-u)(?<cstr>[^\x00]+)\x00").unwrap(); let text = b"foo\xFFbar\x00baz\x00"; // Extract all of the strings without the null terminator from each match. // The unwrap is OK here since a match requires the `cstr` capture to match. let cstrs: Vec<&[u8]> = re.captures_iter(text) .map(|c| c.name("cstr").unwrap().as_bytes()) .collect(); assert_eq!(vec![&b"foo\xFFbar"[..], &b"baz"[..]], cstrs); ``` Notice here that the `[^\x00]+` will match any *byte* except for `NUL`, including bytes like `\xFF` which are not valid UTF-8. When using the main API, `[^\x00]+` would instead match any valid UTF-8 sequence except for `NUL`. ### Usage: match multiple regular expressions simultaneously This demonstrates how to use a `RegexSet` to match multiple (possibly overlapping) regular expressions in a single scan of the search text: ```rust use regex::RegexSet; let set = RegexSet::new(&[ r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", ]).unwrap(); // Iterate over and collect all of the matches. let matches: Vec<_> = set.matches("foobar").into_iter().collect(); assert_eq!(matches, vec![0, 2, 3, 4, 6]); // You can also test whether a particular regex matched: let matches = set.matches("foobar"); assert!(!matches.matched(5)); assert!(matches.matched(6)); ``` ### Usage: regex internals as a library The [`regex-automata` directory](./regex-automata/) contains a crate that exposes all of the internal matching engines used by the `regex` crate. The idea is that the `regex` crate exposes a simple API for 99% of use cases, but `regex-automata` exposes oodles of customizable behaviors. [Documentation for `regex-automata`.](https://docs.rs/regex-automata) ### Usage: a regular expression parser This repository contains a crate that provides a well tested regular expression parser, abstract syntax and a high-level intermediate representation for convenient analysis. It provides no facilities for compilation or execution. This may be useful if you're implementing your own regex engine or otherwise need to do analysis on the syntax of a regular expression. It is otherwise not recommended for general use. [Documentation for `regex-syntax`.](https://docs.rs/regex-syntax) ### Crate features This crate comes with several features that permit tweaking the trade off between binary size, compilation time and runtime performance. Users of this crate can selectively disable Unicode tables, or choose from a variety of optimizations performed by this crate to disable. When all of these features are disabled, runtime match performance may be much worse, but if you're matching on short strings, or if high performance isn't necessary, then such a configuration is perfectly serviceable. To disable all such features, use the following `Cargo.toml` dependency configuration: ```toml [dependencies.regex] version = "1.3" default-features = false # Unless you have a specific reason not to, it's good sense to enable standard # library support. It enables several optimizations and avoids spin locks. It # also shouldn't meaningfully impact compile times or binary size. features = ["std"] ``` This will reduce the dependency tree of `regex` down to two crates: `regex-syntax` and `regex-automata`. The full set of features one can disable are [in the "Crate features" section of the documentation](https://docs.rs/regex/1.*/#crate-features). ### Performance One of the goals of this crate is for the regex engine to be "fast." What that is a somewhat nebulous goal, it is usually interpreted in one of two ways. First, it means that all searches take worst case `O(m * n)` time, where `m` is proportional to `len(regex)` and `n` is proportional to `len(haystack)`. Second, it means that even aside from the time complexity constraint, regex searches are "fast" in practice. While the first interpretation is pretty unambiguous, the second one remains nebulous. While nebulous, it guides this crate's architecture and the sorts of the trade offs it makes. For example, here are some general architectural statements that follow as a result of the goal to be "fast": * When given the choice between faster regex searches and faster Rust compile times, this crate will generally choose faster regex searches. * When given the choice between faster regex searches and faster regex compile times, this crate will generally choose faster regex searches. That is, it is generally acceptable for `Regex::new` to get a little slower if it means that searches get faster. (This is a somewhat delicate balance to strike, because the speed of `Regex::new` needs to remain somewhat reasonable. But this is why one should avoid re-compiling the same regex over and over again.) * When given the choice between faster regex searches and simpler API design, this crate will generally choose faster regex searches. For example, if one didn't care about performance, we could like get rid of both of the `Regex::is_match` and `Regex::find` APIs and instead just rely on `Regex::captures`. There are perhaps more ways that being "fast" influences things. While this repository used to provide its own benchmark suite, it has since been moved to [rebar](https://github.com/BurntSushi/rebar). The benchmarks are quite extensive, and there are many more than what is shown in rebar's README (which is just limited to a "curated" set meant to compare performance between regex engines). To run all of this crate's benchmarks, first start by cloning and installing `rebar`: ```text $ git clone https://github.com/BurntSushi/rebar $ cd rebar $ cargo install --path ./ ``` Then build the benchmark harness for just this crate: ```text $ rebar build -e '^rust/regex$' ``` Run all benchmarks for this crate as tests (each benchmark is executed once to ensure it works): ```text $ rebar measure -e '^rust/regex$' -t ``` Record measurements for all benchmarks and save them to a CSV file: ```text $ rebar measure -e '^rust/regex$' | tee results.csv ``` Explore benchmark timings: ```text $ rebar cmp results.csv ``` See the `rebar` documentation for more details on how it works and how to compare results with other regex engines. ### Minimum Rust version policy This crate's minimum supported `rustc` version is `1.60.0`. The policy is that the minimum Rust version required to use this crate can be increased in minor version updates. For example, if regex 1.0 requires Rust 1.20.0, then regex 1.0.z for all values of `z` will also require Rust 1.20.0 or newer. However, regex 1.y for `y > 0` may require a newer minimum version of Rust. ### License This project is licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. The data in `regex-syntax/src/unicode_tables/` is licensed under the Unicode License Agreement ([LICENSE-UNICODE](https://www.unicode.org/copyright.html#License)). <file_sep>/regex-automata/src/util/memchr.rs /*! This module defines simple wrapper routines for the memchr functions from the `memchr` crate. Basically, when the `memchr` crate is available, we use it, otherwise we use a naive implementation which is still pretty fast. */ pub(crate) use self::inner::*; #[cfg(feature = "perf-literal-substring")] pub(super) mod inner { #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> { memchr::memchr(n1, haystack) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> { memchr::memchr2(n1, n2, haystack) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memchr3( n1: u8, n2: u8, n3: u8, haystack: &[u8], ) -> Option<usize> { memchr::memchr3(n1, n2, n3, haystack) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> { memchr::memrchr(n1, haystack) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> { memchr::memrchr2(n1, n2, haystack) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memrchr3( n1: u8, n2: u8, n3: u8, haystack: &[u8], ) -> Option<usize> { memchr::memrchr3(n1, n2, n3, haystack) } } #[cfg(not(feature = "perf-literal-substring"))] pub(super) mod inner { #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> { haystack.iter().position(|&b| b == n1) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> { haystack.iter().position(|&b| b == n1 || b == n2) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memchr3( n1: u8, n2: u8, n3: u8, haystack: &[u8], ) -> Option<usize> { haystack.iter().position(|&b| b == n1 || b == n2 || b == n3) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> { haystack.iter().rposition(|&b| b == n1) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> { haystack.iter().rposition(|&b| b == n1 || b == n2) } #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn memrchr3( n1: u8, n2: u8, n3: u8, haystack: &[u8], ) -> Option<usize> { haystack.iter().rposition(|&b| b == n1 || b == n2 || b == n3) } } <file_sep>/regex-automata/src/util/pool.rs // This module provides a relatively simple thread-safe pool of reusable // objects. For the most part, it's implemented by a stack represented by a // Mutex<Vec<T>>. It has one small trick: because unlocking a mutex is somewhat // costly, in the case where a pool is accessed by the first thread that tried // to get a value, we bypass the mutex. Here are some benchmarks showing the // difference. // // 2022-10-15: These benchmarks are from the old regex crate and they aren't // easy to reproduce because some rely on older implementations of Pool that // are no longer around. I've left the results here for posterity, but any // enterprising individual should feel encouraged to re-litigate the way Pool // works. I am not at all certain it is the best approach. // // 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) // 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) // 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) // 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) // // (1) represents our baseline: the master branch at the time of writing when // using the 'thread_local' crate to implement the pool below. // // (2) represents a naive pool implemented completely via Mutex<Vec<T>>. There // is no special trick for bypassing the mutex. // // (3) is the same as (2), except it uses Mutex<Vec<Box<T>>>. It is twice as // fast because a Box<T> is much smaller than the T we use with a Pool in this // crate. So pushing and popping a Box<T> from a Vec is quite a bit faster // than for T. // // (4) is the same as (3), but with the trick for bypassing the mutex in the // case of the first-to-get thread. // // Why move off of thread_local? Even though (4) is a hair faster than (1) // above, this was not the main goal. The main goal was to move off of // thread_local and find a way to *simply* re-capture some of its speed for // regex's specific case. So again, why move off of it? The *primary* reason is // because of memory leaks. See https://github.com/rust-lang/regex/issues/362 // for example. (Why do I want it to be simple? Well, I suppose what I mean is, // "use as much safe code as possible to minimize risk and be as sure as I can // be that it is correct.") // // My guess is that the thread_local design is probably not appropriate for // regex since its memory usage scales to the number of active threads that // have used a regex, where as the pool below scales to the number of threads // that simultaneously use a regex. While neither case permits contraction, // since we own the pool data structure below, we can add contraction if a // clear use case pops up in the wild. More pressingly though, it seems that // there are at least some use case patterns where one might have many threads // sitting around that might have used a regex at one point. While thread_local // does try to reuse space previously used by a thread that has since stopped, // its maximal memory usage still scales with the total number of active // threads. In contrast, the pool below scales with the total number of threads // *simultaneously* using the pool. The hope is that this uses less memory // overall. And if it doesn't, we can hopefully tune it somehow. // // It seems that these sort of conditions happen frequently // in FFI inside of other more "managed" languages. This was // mentioned in the issue linked above, and also mentioned here: // https://github.com/BurntSushi/rure-go/issues/3. And in particular, users // confirm that disabling the use of thread_local resolves the leak. // // There were other weaker reasons for moving off of thread_local as well. // Namely, at the time, I was looking to reduce dependencies. And for something // like regex, maintenance can be simpler when we own the full dependency tree. // // Note that I am not entirely happy with this pool. It has some subtle // implementation details and is overall still observable (even with the // thread owner optimization) in benchmarks. If someone wants to take a crack // at building something better, please file an issue. Even if it means a // different API. The API exposed by this pool is not the minimal thing that // something like a 'Regex' actually needs. It could adapt to, for example, // an API more like what is found in the 'thread_local' crate. However, we do // really need to support the no-std alloc-only context, or else the regex // crate wouldn't be able to support no-std alloc-only. However, I'm generally // okay with making the alloc-only context slower (as it is here), although I // do find it unfortunate. /*! A thread safe memory pool. The principal type in this module is a [`Pool`]. It main use case is for holding a thread safe collection of mutable scratch spaces (usually called `Cache` in this crate) that regex engines need to execute a search. This then permits sharing the same read-only regex object across multiple threads while having a quick way of reusing scratch space in a thread safe way. This avoids needing to re-create the scratch space for every search, which could wind up being quite expensive. */ /// A thread safe pool that works in an `alloc`-only context. /// /// Getting a value out comes with a guard. When that guard is dropped, the /// value is automatically put back in the pool. The guard provides both a /// `Deref` and a `DerefMut` implementation for easy access to an underlying /// `T`. /// /// A `Pool` impls `Sync` when `T` is `Send` (even if `T` is not `Sync`). This /// is possible because a pool is guaranteed to provide a value to exactly one /// thread at any time. /// /// Currently, a pool never contracts in size. Its size is proportional to the /// maximum number of simultaneous uses. This may change in the future. /// /// A `Pool` is a particularly useful data structure for this crate because /// many of the regex engines require a mutable "cache" in order to execute /// a search. Since regexes themselves tend to be global, the problem is then: /// how do you get a mutable cache to execute a search? You could: /// /// 1. Use a `thread_local!`, which requires the standard library and requires /// that the regex pattern be statically known. /// 2. Use a `Pool`. /// 3. Make the cache an explicit dependency in your code and pass it around. /// 4. Put the cache state in a `Mutex`, but this means only one search can /// execute at a time. /// 5. Create a new cache for every search. /// /// A `thread_local!` is perhaps the best choice if it works for your use case. /// Putting the cache in a mutex or creating a new cache for every search are /// perhaps the worst choices. Of the remaining two choices, whether you use /// this `Pool` or thread through a cache explicitly in your code is a matter /// of taste and depends on your code architecture. /// /// # Warning: may use a spin lock /// /// When this crate is compiled _without_ the `std` feature, then this type /// may used a spin lock internally. This can have subtle effects that may /// be undesirable. See [Spinlocks Considered Harmful][spinharm] for a more /// thorough treatment of this topic. /// /// [spinharm]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html /// /// # Example /// /// This example shows how to share a single hybrid regex among multiple /// threads, while also safely getting exclusive access to a hybrid's /// [`Cache`](crate::hybrid::regex::Cache) without preventing other searches /// from running while your thread uses the `Cache`. /// /// ``` /// use regex_automata::{ /// hybrid::regex::{Cache, Regex}, /// util::{lazy::Lazy, pool::Pool}, /// Match, /// }; /// /// static RE: Lazy<Regex> = /// Lazy::new(|| Regex::new("foo[0-9]+bar").unwrap()); /// static CACHE: Lazy<Pool<Cache>> = /// Lazy::new(|| Pool::new(|| RE.create_cache())); /// /// let expected = Some(Match::must(0, 3..14)); /// assert_eq!(expected, RE.find(&mut CACHE.get(), b"zzzfoo12345barzzz")); /// ``` pub struct Pool<T, F = fn() -> T>(alloc::boxed::Box<inner::Pool<T, F>>); impl<T, F> Pool<T, F> { /// Create a new pool. The given closure is used to create values in /// the pool when necessary. pub fn new(create: F) -> Pool<T, F> { Pool(alloc::boxed::Box::new(inner::Pool::new(create))) } } impl<T: Send, F: Fn() -> T> Pool<T, F> { /// Get a value from the pool. The caller is guaranteed to have /// exclusive access to the given value. Namely, it is guaranteed that /// this will never return a value that was returned by another call to /// `get` but was not put back into the pool. /// /// When the guard goes out of scope and its destructor is called, then /// it will automatically be put back into the pool. Alternatively, /// [`PoolGuard::put`] may be used to explicitly put it back in the pool /// without relying on its destructor. /// /// Note that there is no guarantee provided about which value in the /// pool is returned. That is, calling get, dropping the guard (causing /// the value to go back into the pool) and then calling get again is /// *not* guaranteed to return the same value received in the first `get` /// call. pub fn get(&self) -> PoolGuard<'_, T, F> { PoolGuard(self.0.get()) } } impl<T: core::fmt::Debug, F> core::fmt::Debug for Pool<T, F> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_tuple("Pool").field(&self.0).finish() } } /// A guard that is returned when a caller requests a value from the pool. /// /// The purpose of the guard is to use RAII to automatically put the value /// back in the pool once it's dropped. pub struct PoolGuard<'a, T: Send, F: Fn() -> T>(inner::PoolGuard<'a, T, F>); impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { /// Consumes this guard and puts it back into the pool. /// /// This circumvents the guard's `Drop` implementation. This can be useful /// in circumstances where the automatic `Drop` results in poorer codegen, /// such as calling non-inlined functions. pub fn put(this: PoolGuard<'_, T, F>) { inner::PoolGuard::put(this.0); } } impl<'a, T: Send, F: Fn() -> T> core::ops::Deref for PoolGuard<'a, T, F> { type Target = T; fn deref(&self) -> &T { self.0.value() } } impl<'a, T: Send, F: Fn() -> T> core::ops::DerefMut for PoolGuard<'a, T, F> { fn deref_mut(&mut self) -> &mut T { self.0.value_mut() } } impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug for PoolGuard<'a, T, F> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_tuple("PoolGuard").field(&self.0).finish() } } #[cfg(feature = "std")] mod inner { use core::{ cell::UnsafeCell, panic::{RefUnwindSafe, UnwindSafe}, sync::atomic::{AtomicUsize, Ordering}, }; use alloc::{boxed::Box, vec, vec::Vec}; use std::{sync::Mutex, thread_local}; /// An atomic counter used to allocate thread IDs. /// /// We specifically start our counter at 3 so that we can use the values /// less than it as sentinels. static COUNTER: AtomicUsize = AtomicUsize::new(3); /// A thread ID indicating that there is no owner. This is the initial /// state of a pool. Once a pool has an owner, there is no way to change /// it. static THREAD_ID_UNOWNED: usize = 0; /// A thread ID indicating that the special owner value is in use and not /// available. This state is useful for avoiding a case where the owner /// of a pool calls `get` before putting the result of a previous `get` /// call back into the pool. static THREAD_ID_INUSE: usize = 1; /// This sentinel is used to indicate that a guard has already been dropped /// and should not be re-dropped. We use this because our drop code can be /// called outside of Drop and thus there could be a bug in the internal /// implementation that results in trying to put the same guard back into /// the same pool multiple times, and *that* could result in UB if we /// didn't mark the guard as already having been put back in the pool. /// /// So this isn't strictly necessary, but this let's us define some /// routines as safe (like PoolGuard::put_imp) that we couldn't otherwise /// do. static THREAD_ID_DROPPED: usize = 2; /// The number of stacks we use inside of the pool. These are only used for /// non-owners. That is, these represent the "slow" path. /// /// In the original implementation of this pool, we only used a single /// stack. While this might be okay for a couple threads, the prevalence of /// 32, 64 and even 128 core CPUs has made it untenable. The contention /// such an environment introduces when threads are doing a lot of searches /// on short haystacks (a not uncommon use case) is palpable and leads to /// huge slowdowns. /// /// This constant reflects a change from using one stack to the number of /// stacks that this constant is set to. The stack for a particular thread /// is simply chosen by `thread_id % MAX_POOL_STACKS`. The idea behind /// this setup is that there should be a good chance that accesses to the /// pool will be distributed over several stacks instead of all of them /// converging to one. /// /// This is not a particularly smart or dynamic strategy. Fixing this to a /// specific number has at least two downsides. First is that it will help, /// say, an 8 core CPU more than it will a 128 core CPU. (But, crucially, /// it will still help the 128 core case.) Second is that this may wind /// up being a little wasteful with respect to memory usage. Namely, if a /// regex is used on one thread and then moved to another thread, then it /// could result in creating a new copy of the data in the pool even though /// only one is actually needed. /// /// And that memory usage bit is why this is set to 8 and not, say, 64. /// Keeping it at 8 limits, to an extent, how much unnecessary memory can /// be allocated. /// /// In an ideal world, we'd be able to have something like this: /// /// * Grow the number of stacks as the number of concurrent callers /// increases. I spent a little time trying this, but even just adding an /// atomic addition/subtraction for each pop/push for tracking concurrent /// callers led to a big perf hit. Since even more work would seemingly be /// required than just an addition/subtraction, I abandoned this approach. /// * The maximum amount of memory used should scale with respect to the /// number of concurrent callers and *not* the total number of existing /// threads. This is primarily why the `thread_local` crate isn't used, as /// as some environments spin up a lot of threads. This led to multiple /// reports of extremely high memory usage (often described as memory /// leaks). /// * Even more ideally, the pool should contract in size. That is, it /// should grow with bursts and then shrink. But this is a pretty thorny /// issue to tackle and it might be better to just not. /// * It would be nice to explore the use of, say, a lock-free stack /// instead of using a mutex to guard a `Vec` that is ultimately just /// treated as a stack. The main thing preventing me from exploring this /// is the ABA problem. The `crossbeam` crate has tools for dealing with /// this sort of problem (via its epoch based memory reclamation strategy), /// but I can't justify bringing in all of `crossbeam` as a dependency of /// `regex` for this. /// /// See this issue for more context and discussion: /// https://github.com/rust-lang/regex/issues/934 const MAX_POOL_STACKS: usize = 8; thread_local!( /// A thread local used to assign an ID to a thread. static THREAD_ID: usize = { let next = COUNTER.fetch_add(1, Ordering::Relaxed); // SAFETY: We cannot permit the reuse of thread IDs since reusing a // thread ID might result in more than one thread "owning" a pool, // and thus, permit accessing a mutable value from multiple threads // simultaneously without synchronization. The intent of this panic // is to be a sanity check. It is not expected that the thread ID // space will actually be exhausted in practice. Even on a 32-bit // system, it would require spawning 2^32 threads (although they // wouldn't all need to run simultaneously, so it is in theory // possible). // // This checks that the counter never wraps around, since atomic // addition wraps around on overflow. if next == 0 { panic!("regex: thread ID allocation space exhausted"); } next }; ); /// This puts each stack in the pool below into its own cache line. This is /// an absolutely critical optimization that tends to have the most impact /// in high contention workloads. Without forcing each mutex protected /// into its own cache line, high contention exacerbates the performance /// problem by causing "false sharing." By putting each mutex in its own /// cache-line, we avoid the false sharing problem and the affects of /// contention are greatly reduced. #[derive(Debug)] #[repr(C, align(64))] struct CacheLine<T>(T); /// A thread safe pool utilizing std-only features. /// /// The main difference between this and the simplistic alloc-only pool is /// the use of std::sync::Mutex and an "owner thread" optimization that /// makes accesses by the owner of a pool faster than all other threads. /// This makes the common case of running a regex within a single thread /// faster by avoiding mutex unlocking. pub(super) struct Pool<T, F> { /// A function to create more T values when stack is empty and a caller /// has requested a T. create: F, /// Multiple stacks of T values to hand out. These are used when a Pool /// is accessed by a thread that didn't create it. /// /// Conceptually this is `Mutex<Vec<Box<T>>>`, but sharded out to make /// it scale better under high contention work-loads. We index into /// this sequence via `thread_id % stacks.len()`. stacks: Vec<CacheLine<Mutex<Vec<Box<T>>>>>, /// The ID of the thread that owns this pool. The owner is the thread /// that makes the first call to 'get'. When the owner calls 'get', it /// gets 'owner_val' directly instead of returning a T from 'stack'. /// See comments elsewhere for details, but this is intended to be an /// optimization for the common case that makes getting a T faster. /// /// It is initialized to a value of zero (an impossible thread ID) as a /// sentinel to indicate that it is unowned. owner: AtomicUsize, /// A value to return when the caller is in the same thread that /// first called `Pool::get`. /// /// This is set to None when a Pool is first created, and set to Some /// once the first thread calls Pool::get. owner_val: UnsafeCell<Option<T>>, } // SAFETY: Since we want to use a Pool from multiple threads simultaneously // behind an Arc, we need for it to be Sync. In cases where T is sync, // Pool<T> would be Sync. However, since we use a Pool to store mutable // scratch space, we wind up using a T that has interior mutability and is // thus itself not Sync. So what we *really* want is for our Pool<T> to by // Sync even when T is not Sync (but is at least Send). // // The only non-sync aspect of a Pool is its 'owner_val' field, which is // used to implement faster access to a pool value in the common case of // a pool being accessed in the same thread in which it was created. The // 'stack' field is also shared, but a Mutex<T> where T: Send is already // Sync. So we only need to worry about 'owner_val'. // // The key is to guarantee that 'owner_val' can only ever be accessed from // one thread. In our implementation below, we guarantee this by only // returning the 'owner_val' when the ID of the current thread matches the // ID of the thread that first called 'Pool::get'. Since this can only ever // be one thread, it follows that only one thread can access 'owner_val' at // any point in time. Thus, it is safe to declare that Pool<T> is Sync when // T is Send. // // If there is a way to achieve our performance goals using safe code, then // I would very much welcome a patch. As it stands, the implementation // below tries to balance safety with performance. The case where a Regex // is used from multiple threads simultaneously will suffer a bit since // getting a value out of the pool will require unlocking a mutex. // // We require `F: Send + Sync` because we call `F` at any point on demand, // potentially from multiple threads simultaneously. unsafe impl<T: Send, F: Send + Sync> Sync for Pool<T, F> {} // If T is UnwindSafe, then since we provide exclusive access to any // particular value in the pool, the pool should therefore also be // considered UnwindSafe. // // We require `F: UnwindSafe + RefUnwindSafe` because we call `F` at any // point on demand, so it needs to be unwind safe on both dimensions for // the entire Pool to be unwind safe. impl<T: UnwindSafe, F: UnwindSafe + RefUnwindSafe> UnwindSafe for Pool<T, F> {} // If T is UnwindSafe, then since we provide exclusive access to any // particular value in the pool, the pool should therefore also be // considered RefUnwindSafe. // // We require `F: UnwindSafe + RefUnwindSafe` because we call `F` at any // point on demand, so it needs to be unwind safe on both dimensions for // the entire Pool to be unwind safe. impl<T: UnwindSafe, F: UnwindSafe + RefUnwindSafe> RefUnwindSafe for Pool<T, F> { } impl<T, F> Pool<T, F> { /// Create a new pool. The given closure is used to create values in /// the pool when necessary. pub(super) fn new(create: F) -> Pool<T, F> { // MSRV(1.63): Mark this function as 'const'. I've arranged the // code such that it should "just work." Then mark the public // 'Pool::new' method as 'const' too. (The alloc-only Pool::new // is already 'const', so that should "just work" too.) The only // thing we're waiting for is Mutex::new to be const. let mut stacks = Vec::with_capacity(MAX_POOL_STACKS); for _ in 0..stacks.capacity() { stacks.push(CacheLine(Mutex::new(vec![]))); } let owner = AtomicUsize::new(THREAD_ID_UNOWNED); let owner_val = UnsafeCell::new(None); // init'd on first access Pool { create, stacks, owner, owner_val } } } impl<T: Send, F: Fn() -> T> Pool<T, F> { /// Get a value from the pool. This may block if another thread is also /// attempting to retrieve a value from the pool. pub(super) fn get(&self) -> PoolGuard<'_, T, F> { // Our fast path checks if the caller is the thread that "owns" // this pool. Or stated differently, whether it is the first thread // that tried to extract a value from the pool. If it is, then we // can return a T to the caller without going through a mutex. // // SAFETY: We must guarantee that only one thread gets access // to this value. Since a thread is uniquely identified by the // THREAD_ID thread local, it follows that if the caller's thread // ID is equal to the owner, then only one thread may receive this // value. This is also why we can get away with what looks like a // racy load and a store. We know that if 'owner == caller', then // only one thread can be here, so we don't need to worry about any // other thread setting the owner to something else. let caller = THREAD_ID.with(|id| *id); let owner = self.owner.load(Ordering::Acquire); if caller == owner { // N.B. We could also do a CAS here instead of a load/store, // but ad hoc benchmarking suggests it is slower. And a lot // slower in the case where `get_slow` is common. self.owner.store(THREAD_ID_INUSE, Ordering::Release); return self.guard_owned(caller); } self.get_slow(caller, owner) } /// This is the "slow" version that goes through a mutex to pop an /// allocated value off a stack to return to the caller. (Or, if the /// stack is empty, a new value is created.) /// /// If the pool has no owner, then this will set the owner. #[cold] fn get_slow( &self, caller: usize, owner: usize, ) -> PoolGuard<'_, T, F> { if owner == THREAD_ID_UNOWNED { // This sentinel means this pool is not yet owned. We try to // atomically set the owner. If we do, then this thread becomes // the owner and we can return a guard that represents the // special T for the owner. // // Note that we set the owner to a different sentinel that // indicates that the owned value is in use. The owner ID will // get updated to the actual ID of this thread once the guard // returned by this function is put back into the pool. let res = self.owner.compare_exchange( THREAD_ID_UNOWNED, THREAD_ID_INUSE, Ordering::AcqRel, Ordering::Acquire, ); if res.is_ok() { // SAFETY: A successful CAS above implies this thread is // the owner and that this is the only such thread that // can reach here. Thus, there is no data race. unsafe { *self.owner_val.get() = Some((self.create)()); } return self.guard_owned(caller); } } let stack_id = caller % self.stacks.len(); // We try to acquire exclusive access to this thread's stack, and // if so, grab a value from it if we can. We put this in a loop so // that it's easy to tweak and experiment with a different number // of tries. In the end, I couldn't see anything obviously better // than one attempt in ad hoc testing. for _ in 0..1 { let mut stack = match self.stacks[stack_id].0.try_lock() { Err(_) => continue, Ok(stack) => stack, }; if let Some(value) = stack.pop() { return self.guard_stack(value); } // Unlock the mutex guarding the stack before creating a fresh // value since we no longer need the stack. drop(stack); let value = Box::new((self.create)()); return self.guard_stack(value); } // We're only here if we could get access to our stack, so just // create a new value. This seems like it could be wasteful, but // waiting for exclusive access to a stack when there's high // contention is brutal for perf. self.guard_stack_transient(Box::new((self.create)())) } /// Puts a value back into the pool. Callers don't need to call this. /// Once the guard that's returned by 'get' is dropped, it is put back /// into the pool automatically. fn put_value(&self, value: Box<T>) { let caller = THREAD_ID.with(|id| *id); let stack_id = caller % self.stacks.len(); // As with trying to pop a value from this thread's stack, we // merely attempt to get access to push this value back on the // stack. If there's too much contention, we just give up and throw // the value away. // // Interestingly, in ad hoc benchmarking, it is beneficial to // attempt to push the value back more than once, unlike when // popping the value. I don't have a good theory for why this is. // I guess if we drop too many values then that winds up forcing // the pop operation to create new fresh values and thus leads to // less reuse. There's definitely a balancing act here. for _ in 0..10 { let mut stack = match self.stacks[stack_id].0.try_lock() { Err(_) => continue, Ok(stack) => stack, }; stack.push(value); return; } } /// Create a guard that represents the special owned T. fn guard_owned(&self, caller: usize) -> PoolGuard<'_, T, F> { PoolGuard { pool: self, value: Err(caller), discard: false } } /// Create a guard that contains a value from the pool's stack. fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T, F> { PoolGuard { pool: self, value: Ok(value), discard: false } } /// Create a guard that contains a value from the pool's stack with an /// instruction to throw away the value instead of putting it back /// into the pool. fn guard_stack_transient(&self, value: Box<T>) -> PoolGuard<'_, T, F> { PoolGuard { pool: self, value: Ok(value), discard: true } } } impl<T: core::fmt::Debug, F> core::fmt::Debug for Pool<T, F> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Pool") .field("stacks", &self.stacks) .field("owner", &self.owner) .field("owner_val", &self.owner_val) .finish() } } /// A guard that is returned when a caller requests a value from the pool. pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> { /// The pool that this guard is attached to. pool: &'a Pool<T, F>, /// This is Err when the guard represents the special "owned" value. /// In which case, the value is retrieved from 'pool.owner_val'. And /// in the special case of `Err(THREAD_ID_DROPPED)`, it means the /// guard has been put back into the pool and should no longer be used. value: Result<Box<T>, usize>, /// When true, the value should be discarded instead of being pushed /// back into the pool. We tend to use this under high contention, and /// this allows us to avoid inflating the size of the pool. (Because /// under contention, we tend to create more values instead of waiting /// for access to a stack of existing values.) discard: bool, } impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { /// Return the underlying value. pub(super) fn value(&self) -> &T { match self.value { Ok(ref v) => &**v, // SAFETY: This is safe because the only way a PoolGuard gets // created for self.value=Err is when the current thread // corresponds to the owning thread, of which there can only // be one. Thus, we are guaranteed to be providing exclusive // access here which makes this safe. // // Also, since 'owner_val' is guaranteed to be initialized // before an owned PoolGuard is created, the unchecked unwrap // is safe. Err(id) => unsafe { // This assert is *not* necessary for safety, since we // should never be here if the guard had been put back into // the pool. This is a sanity check to make sure we didn't // break an internal invariant. debug_assert_ne!(THREAD_ID_DROPPED, id); (*self.pool.owner_val.get()).as_ref().unwrap_unchecked() }, } } /// Return the underlying value as a mutable borrow. pub(super) fn value_mut(&mut self) -> &mut T { match self.value { Ok(ref mut v) => &mut **v, // SAFETY: This is safe because the only way a PoolGuard gets // created for self.value=None is when the current thread // corresponds to the owning thread, of which there can only // be one. Thus, we are guaranteed to be providing exclusive // access here which makes this safe. // // Also, since 'owner_val' is guaranteed to be initialized // before an owned PoolGuard is created, the unwrap_unchecked // is safe. Err(id) => unsafe { // This assert is *not* necessary for safety, since we // should never be here if the guard had been put back into // the pool. This is a sanity check to make sure we didn't // break an internal invariant. debug_assert_ne!(THREAD_ID_DROPPED, id); (*self.pool.owner_val.get()).as_mut().unwrap_unchecked() }, } } /// Consumes this guard and puts it back into the pool. pub(super) fn put(this: PoolGuard<'_, T, F>) { // Since this is effectively consuming the guard and putting the // value back into the pool, there's no reason to run its Drop // impl after doing this. I don't believe there is a correctness // problem with doing so, but there's definitely a perf problem // by redoing this work. So we avoid it. let mut this = core::mem::ManuallyDrop::new(this); this.put_imp(); } /// Puts this guard back into the pool by only borrowing the guard as /// mutable. This should be called at most once. #[inline(always)] fn put_imp(&mut self) { match core::mem::replace(&mut self.value, Err(THREAD_ID_DROPPED)) { Ok(value) => { // If we were told to discard this value then don't bother // trying to put it back into the pool. This occurs when // the pop operation failed to acquire a lock and we // decided to create a new value in lieu of contending for // the lock. if self.discard { return; } self.pool.put_value(value); } // If this guard has a value "owned" by the thread, then // the Pool guarantees that this is the ONLY such guard. // Therefore, in order to place it back into the pool and make // it available, we need to change the owner back to the owning // thread's ID. But note that we use the ID that was stored in // the guard, since a guard can be moved to another thread and // dropped. (A previous iteration of this code read from the // THREAD_ID thread local, which uses the ID of the current // thread which may not be the ID of the owning thread! This // also avoids the TLS access, which is likely a hair faster.) Err(owner) => { // If we hit this point, it implies 'put_imp' has been // called multiple times for the same guard which in turn // corresponds to a bug in this implementation. assert_ne!(THREAD_ID_DROPPED, owner); self.pool.owner.store(owner, Ordering::Release); } } } } impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> { fn drop(&mut self) { self.put_imp(); } } impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug for PoolGuard<'a, T, F> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("PoolGuard") .field("pool", &self.pool) .field("value", &self.value) .finish() } } } // FUTURE: We should consider using Mara Bos's nearly-lock-free version of this // here: https://gist.github.com/m-ou-se/5fdcbdf7dcf4585199ce2de697f367a4. // // One reason why I did things with a "mutex" below is that it isolates the // safety concerns to just the Mutex, where as the safety of Mara's pool is a // bit more sprawling. I also expect this code to not be used that much, and // so is unlikely to get as much real world usage with which to test it. That // means the "obviously correct" lever is an important one. // // The specific reason to use Mara's pool is that it is likely faster and also // less likely to hit problems with spin-locks, although it is not completely // impervious to them. // // The best solution to this problem, probably, is a truly lock free pool. That // could be done with a lock free linked list. The issue is the ABA problem. It // is difficult to avoid, and doing so is complex. BUT, the upshot of that is // that if we had a truly lock free pool, then we could also use it above in // the 'std' pool instead of a Mutex because it should be completely free the // problems that come from spin-locks. #[cfg(not(feature = "std"))] mod inner { use core::{ cell::UnsafeCell, panic::{RefUnwindSafe, UnwindSafe}, sync::atomic::{AtomicBool, Ordering}, }; use alloc::{boxed::Box, vec, vec::Vec}; /// A thread safe pool utilizing alloc-only features. /// /// Unlike the std version, it doesn't seem possible(?) to implement the /// "thread owner" optimization because alloc-only doesn't have any concept /// of threads. So the best we can do is just a normal stack. This will /// increase latency in alloc-only environments. pub(super) struct Pool<T, F> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: F, } // If T is UnwindSafe, then since we provide exclusive access to any // particular value in the pool, it should therefore also be considered // RefUnwindSafe. impl<T: UnwindSafe, F: UnwindSafe> RefUnwindSafe for Pool<T, F> {} impl<T, F> Pool<T, F> { /// Create a new pool. The given closure is used to create values in /// the pool when necessary. pub(super) const fn new(create: F) -> Pool<T, F> { Pool { stack: Mutex::new(vec![]), create } } } impl<T: Send, F: Fn() -> T> Pool<T, F> { /// Get a value from the pool. This may block if another thread is also /// attempting to retrieve a value from the pool. pub(super) fn get(&self) -> PoolGuard<'_, T, F> { let mut stack = self.stack.lock(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; PoolGuard { pool: self, value: Some(value) } } fn put(&self, guard: PoolGuard<'_, T, F>) { let mut guard = core::mem::ManuallyDrop::new(guard); if let Some(value) = guard.value.take() { self.put_value(value); } } /// Puts a value back into the pool. Callers don't need to call this. /// Once the guard that's returned by 'get' is dropped, it is put back /// into the pool automatically. fn put_value(&self, value: Box<T>) { let mut stack = self.stack.lock(); stack.push(value); } } impl<T: core::fmt::Debug, F> core::fmt::Debug for Pool<T, F> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Pool").field("stack", &self.stack).finish() } } /// A guard that is returned when a caller requests a value from the pool. pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> { /// The pool that this guard is attached to. pool: &'a Pool<T, F>, /// This is None after the guard has been put back into the pool. value: Option<Box<T>>, } impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { /// Return the underlying value. pub(super) fn value(&self) -> &T { self.value.as_deref().unwrap() } /// Return the underlying value as a mutable borrow. pub(super) fn value_mut(&mut self) -> &mut T { self.value.as_deref_mut().unwrap() } /// Consumes this guard and puts it back into the pool. pub(super) fn put(this: PoolGuard<'_, T, F>) { // Since this is effectively consuming the guard and putting the // value back into the pool, there's no reason to run its Drop // impl after doing this. I don't believe there is a correctness // problem with doing so, but there's definitely a perf problem // by redoing this work. So we avoid it. let mut this = core::mem::ManuallyDrop::new(this); this.put_imp(); } /// Puts this guard back into the pool by only borrowing the guard as /// mutable. This should be called at most once. #[inline(always)] fn put_imp(&mut self) { if let Some(value) = self.value.take() { self.pool.put_value(value); } } } impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> { fn drop(&mut self) { self.put_imp(); } } impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug for PoolGuard<'a, T, F> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("PoolGuard") .field("pool", &self.pool) .field("value", &self.value) .finish() } } /// A spin-lock based mutex. Yes, I have read spinlocks cosnidered /// harmful[1], and if there's a reasonable alternative choice, I'll /// happily take it. /// /// I suspect the most likely alternative here is a Treiber stack, but /// implementing one correctly in a way that avoids the ABA problem looks /// subtle enough that I'm not sure I want to attempt that. But otherwise, /// we only need a mutex in order to implement our pool, so if there's /// something simpler we can use that works for our `Pool` use case, then /// that would be great. /// /// Note that this mutex does not do poisoning. /// /// [1]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html #[derive(Debug)] struct Mutex<T> { locked: AtomicBool, data: UnsafeCell<T>, } // SAFETY: Since a Mutex guarantees exclusive access, as long as we can // send it across threads, it must also be Sync. unsafe impl<T: Send> Sync for Mutex<T> {} impl<T> Mutex<T> { /// Create a new mutex for protecting access to the given value across /// multiple threads simultaneously. const fn new(value: T) -> Mutex<T> { Mutex { locked: AtomicBool::new(false), data: UnsafeCell::new(value), } } /// Lock this mutex and return a guard providing exclusive access to /// `T`. This blocks if some other thread has already locked this /// mutex. fn lock(&self) -> MutexGuard<'_, T> { while self .locked .compare_exchange( false, true, Ordering::AcqRel, Ordering::Acquire, ) .is_err() { core::hint::spin_loop(); } // SAFETY: The only way we're here is if we successfully set // 'locked' to true, which implies we must be the only thread here // and thus have exclusive access to 'data'. let data = unsafe { &mut *self.data.get() }; MutexGuard { locked: &self.locked, data } } } /// A guard that derefs to &T and &mut T. When it's dropped, the lock is /// released. #[derive(Debug)] struct MutexGuard<'a, T> { locked: &'a AtomicBool, data: &'a mut T, } impl<'a, T> core::ops::Deref for MutexGuard<'a, T> { type Target = T; fn deref(&self) -> &T { self.data } } impl<'a, T> core::ops::DerefMut for MutexGuard<'a, T> { fn deref_mut(&mut self) -> &mut T { self.data } } impl<'a, T> Drop for MutexGuard<'a, T> { fn drop(&mut self) { // Drop means 'data' is no longer accessible, so we can unlock // the mutex. self.locked.store(false, Ordering::Release); } } } #[cfg(test)] mod tests { use core::panic::{RefUnwindSafe, UnwindSafe}; use alloc::{boxed::Box, vec, vec::Vec}; use super::*; #[test] fn oibits() { fn assert_oitbits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} assert_oitbits::<Pool<Vec<u32>>>(); assert_oitbits::<Pool<core::cell::RefCell<Vec<u32>>>>(); assert_oitbits::< Pool< Vec<u32>, Box< dyn Fn() -> Vec<u32> + Send + Sync + UnwindSafe + RefUnwindSafe, >, >, >(); } // Tests that Pool implements the "single owner" optimization. That is, the // thread that first accesses the pool gets its own copy, while all other // threads get distinct copies. #[cfg(feature = "std")] #[test] fn thread_owner_optimization() { use std::{cell::RefCell, sync::Arc, vec}; let pool: Arc<Pool<RefCell<Vec<char>>>> = Arc::new(Pool::new(|| RefCell::new(vec!['a']))); pool.get().borrow_mut().push('x'); let pool1 = pool.clone(); let t1 = std::thread::spawn(move || { let guard = pool1.get(); guard.borrow_mut().push('y'); }); let pool2 = pool.clone(); let t2 = std::thread::spawn(move || { let guard = pool2.get(); guard.borrow_mut().push('z'); }); t1.join().unwrap(); t2.join().unwrap(); // If we didn't implement the single owner optimization, then one of // the threads above is likely to have mutated the [a, x] vec that // we stuffed in the pool before spawning the threads. But since // neither thread was first to access the pool, and because of the // optimization, we should be guaranteed that neither thread mutates // the special owned pool value. // // (Technically this is an implementation detail and not a contract of // Pool's API.) assert_eq!(vec!['a', 'x'], *pool.get().borrow()); } // This tests that if the "owner" of a pool asks for two values, then it // gets two distinct values and not the same one. This test failed in the // course of developing the pool, which in turn resulted in UB because it // permitted getting aliasing &mut borrows to the same place in memory. #[test] fn thread_owner_distinct() { let pool = Pool::new(|| vec!['a']); { let mut g1 = pool.get(); let v1 = &mut *g1; let mut g2 = pool.get(); let v2 = &mut *g2; v1.push('b'); v2.push('c'); assert_eq!(&mut vec!['a', 'b'], v1); assert_eq!(&mut vec!['a', 'c'], v2); } // This isn't technically guaranteed, but we // expect to now get the "owned" value (the first // call to 'get()' above) now that it's back in // the pool. assert_eq!(&mut vec!['a', 'b'], &mut *pool.get()); } // This tests that we can share a guard with another thread, mutate the // underlying value and everything works. This failed in the course of // developing a pool since the pool permitted 'get()' to return the same // value to the owner thread, even before the previous value was put back // into the pool. This in turn resulted in this test producing a data race. #[cfg(feature = "std")] #[test] fn thread_owner_sync() { let pool = Pool::new(|| vec!['a']); { let mut g1 = pool.get(); let mut g2 = pool.get(); std::thread::scope(|s| { s.spawn(|| { g1.push('b'); }); s.spawn(|| { g2.push('c'); }); }); let v1 = &mut *g1; let v2 = &mut *g2; assert_eq!(&mut vec!['a', 'b'], v1); assert_eq!(&mut vec!['a', 'c'], v2); } // This isn't technically guaranteed, but we // expect to now get the "owned" value (the first // call to 'get()' above) now that it's back in // the pool. assert_eq!(&mut vec!['a', 'b'], &mut *pool.get()); } // This tests that if we move a PoolGuard that is owned by the current // thread to another thread and drop it, then the thread owner doesn't // change. During development of the pool, this test failed because the // PoolGuard assumed it was dropped in the same thread from which it was // created, and thus used the current thread's ID as the owner, which could // be different than the actual owner of the pool. #[cfg(feature = "std")] #[test] fn thread_owner_send_drop() { let pool = Pool::new(|| vec!['a']); // Establishes this thread as the owner. { pool.get().push('b'); } std::thread::scope(|s| { // Sanity check that we get the same value back. // (Not technically guaranteed.) let mut g = pool.get(); assert_eq!(&vec!['a', 'b'], &*g); // Now push it to another thread and drop it. s.spawn(move || { g.push('c'); }) .join() .unwrap(); }); // Now check that we're still the owner. This is not technically // guaranteed by the API, but is true in practice given the thread // owner optimization. assert_eq!(&vec!['a', 'b', 'c'], &*pool.get()); } } <file_sep>/regex-cli/args/flags.rs use bstr::ByteVec; use crate::args::Usage; /// This defines a implementation for a flag that wants a single byte. This is /// useful because there are some APIs that require a single byte. For example, /// setting a line terminator. /// /// This in particular supports the ability to write the byte via an escape /// sequence. For example, `--flag '\xFF'` will parse to the single byte 0xFF. /// /// If the flag value is empty or if it unescapes into something with more than /// one byte, then it is considered an error. #[derive(Debug, Default)] pub struct OneByte(pub u8); impl std::str::FromStr for OneByte { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<OneByte> { let bytes = Vec::unescape_bytes(s); anyhow::ensure!( bytes.len() == 1, "expected exactly one byte, but got {} bytes", bytes.len(), ); Ok(OneByte(bytes[0])) } } /// This defines a implementation for a flag that wants a possibly empty set /// of bytes. This is useful because there are some APIs that require multiple /// individual bytes. For example, setting quit bytes for a DFA. /// /// This in particular supports the ability to write the byte set via a /// sequence of escape sequences. For example, `--flag 'a\xFF\t'` will parse to /// the sequence 0x61 0xFF 0x09. /// /// By default, the set is empty. If the flag value has a duplicate byte, then /// an error is returned. An empty value corresponds to the empty set. #[derive(Debug, Default)] pub struct ByteSet(pub Vec<u8>); impl std::str::FromStr for ByteSet { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<ByteSet> { let mut set = vec![]; let mut seen = [false; 256]; for &byte in Vec::unescape_bytes(s).iter() { anyhow::ensure!( !seen[usize::from(byte)], "saw duplicate byte 0x{:2X} in '{}'", byte, s, ); seen[usize::from(byte)] = true; set.push(byte); } set.sort(); Ok(ByteSet(set)) } } /// Provides an implementation of the --start-kind flag, for use with DFA /// configuration. #[derive(Debug)] pub struct StartKind { pub kind: regex_automata::dfa::StartKind, } impl StartKind { pub const USAGE: Usage = Usage::new( "--start-kind <kind>", "One of: both, unanchored, anchored.", r#" Sets the start states supported by a DFA. The default is 'both', but it can be set to either 'unanchored' or 'anchored'. The benefit of only supporting unanchored or anchored start states is that it usually leads to a smaller overall automaton. "#, ); } impl Default for StartKind { fn default() -> StartKind { StartKind { kind: regex_automata::dfa::StartKind::Both } } } impl std::str::FromStr for StartKind { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<StartKind> { let kind = match s { "both" => regex_automata::dfa::StartKind::Both, "unanchored" => regex_automata::dfa::StartKind::Unanchored, "anchored" => regex_automata::dfa::StartKind::Anchored, unk => anyhow::bail!("unrecognized start kind '{}'", unk), }; Ok(StartKind { kind }) } } /// Provides an implementation of the --match-kind flag, for use with most /// regex matchers. #[derive(Debug)] pub struct MatchKind { pub kind: regex_automata::MatchKind, } impl MatchKind { pub const USAGE: Usage = Usage::new( "-k, --match-kind <kind>", "One of: leftmost-first, all.", r#" Selects the match semantics for the regex engine. The choices are 'leftmost-first' (the default) or 'all'. 'leftmost-first' semantics look for the leftmost match, and when there are multiple leftmost matches, match priority disambiguates them. For example, in the haystack 'samwise', the regex 'samwise|sam' will match 'samwise' when using leftmost-first semantics. Similarly, the regex 'sam|samwise' will match 'sam'. 'all' semantics results in including all possible match states in the underlying automaton. When performing an unanchored leftmost search, this has the effect of finding the last match, which is usually not what you want. When performing an anchored leftmost search, it has the effect of finding the longest possible match, which might be what you want. (So there is no support for greedy vs non-greedy searching. Everything is greedy.) 'all' is also useful for overlapping searches, since all matches are reportable in this scheme. "#, ); } impl Default for MatchKind { fn default() -> MatchKind { MatchKind { kind: regex_automata::MatchKind::LeftmostFirst } } } impl std::str::FromStr for MatchKind { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<MatchKind> { let kind = match s { "leftmost-first" => regex_automata::MatchKind::LeftmostFirst, "all" => regex_automata::MatchKind::All, unk => anyhow::bail!("unrecognized match kind '{}'", unk), }; Ok(MatchKind { kind }) } } <file_sep>/regex-automata/src/util/utf8.rs /*! Utilities for dealing with UTF-8. This module provides some UTF-8 related helper routines, including an incremental decoder. */ /// Returns true if and only if the given byte is considered a word character. /// This only applies to ASCII. /// /// This was copied from regex-syntax so that we can use it to determine the /// starting DFA state while searching without depending on regex-syntax. The /// definition is never going to change, so there's no maintenance/bit-rot /// hazard here. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_word_byte(b: u8) -> bool { const fn mkwordset() -> [bool; 256] { // FIXME: Use as_usize() once const functions in traits are stable. let mut set = [false; 256]; set[b'_' as usize] = true; let mut byte = b'0'; while byte <= b'9' { set[byte as usize] = true; byte += 1; } byte = b'A'; while byte <= b'Z' { set[byte as usize] = true; byte += 1; } byte = b'a'; while byte <= b'z' { set[byte as usize] = true; byte += 1; } set } const WORD: [bool; 256] = mkwordset(); WORD[b as usize] } /// Decodes the next UTF-8 encoded codepoint from the given byte slice. /// /// If no valid encoding of a codepoint exists at the beginning of the given /// byte slice, then the first byte is returned instead. /// /// This returns `None` if and only if `bytes` is empty. /// /// This never panics. /// /// *WARNING*: This is not designed for performance. If you're looking for a /// fast UTF-8 decoder, this is not it. If you feel like you need one in this /// crate, then please file an issue and discuss your use case. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn decode(bytes: &[u8]) -> Option<Result<char, u8>> { if bytes.is_empty() { return None; } let len = match len(bytes[0]) { None => return Some(Err(bytes[0])), Some(len) if len > bytes.len() => return Some(Err(bytes[0])), Some(1) => return Some(Ok(char::from(bytes[0]))), Some(len) => len, }; match core::str::from_utf8(&bytes[..len]) { Ok(s) => Some(Ok(s.chars().next().unwrap())), Err(_) => Some(Err(bytes[0])), } } /// Decodes the last UTF-8 encoded codepoint from the given byte slice. /// /// If no valid encoding of a codepoint exists at the end of the given byte /// slice, then the last byte is returned instead. /// /// This returns `None` if and only if `bytes` is empty. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn decode_last(bytes: &[u8]) -> Option<Result<char, u8>> { if bytes.is_empty() { return None; } let mut start = bytes.len() - 1; let limit = bytes.len().saturating_sub(4); while start > limit && !is_leading_or_invalid_byte(bytes[start]) { start -= 1; } match decode(&bytes[start..]) { None => None, Some(Ok(ch)) => Some(Ok(ch)), Some(Err(_)) => Some(Err(bytes[bytes.len() - 1])), } } /// Given a UTF-8 leading byte, this returns the total number of code units /// in the following encoded codepoint. /// /// If the given byte is not a valid UTF-8 leading byte, then this returns /// `None`. #[cfg_attr(feature = "perf-inline", inline(always))] fn len(byte: u8) -> Option<usize> { if byte <= 0x7F { return Some(1); } else if byte & 0b1100_0000 == 0b1000_0000 { return None; } else if byte <= 0b1101_1111 { Some(2) } else if byte <= 0b1110_1111 { Some(3) } else if byte <= 0b1111_0111 { Some(4) } else { None } } /// Returns true if and only if the given offset in the given bytes falls on a /// valid UTF-8 encoded codepoint boundary. /// /// If `bytes` is not valid UTF-8, then the behavior of this routine is /// unspecified. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_boundary(bytes: &[u8], i: usize) -> bool { match bytes.get(i) { // The position at the end of the bytes always represents an empty // string, which is a valid boundary. But anything after that doesn't // make much sense to call valid a boundary. None => i == bytes.len(), // Other than ASCII (where the most significant bit is never set), // valid starting bytes always have their most significant two bits // set, where as continuation bytes never have their second most // significant bit set. Therefore, this only returns true when bytes[i] // corresponds to a byte that begins a valid UTF-8 encoding of a // Unicode scalar value. Some(&b) => b <= 0b0111_1111 || b >= 0b1100_0000, } } /// Returns true if and only if the given byte is either a valid leading UTF-8 /// byte, or is otherwise an invalid byte that can never appear anywhere in a /// valid UTF-8 sequence. #[cfg_attr(feature = "perf-inline", inline(always))] fn is_leading_or_invalid_byte(b: u8) -> bool { // In the ASCII case, the most significant bit is never set. The leading // byte of a 2/3/4-byte sequence always has the top two most significant // bits set. For bytes that can never appear anywhere in valid UTF-8, this // also returns true, since every such byte has its two most significant // bits set: // // \xC0 :: 11000000 // \xC1 :: 11000001 // \xF5 :: 11110101 // \xF6 :: 11110110 // \xF7 :: 11110111 // \xF8 :: 11111000 // \xF9 :: 11111001 // \xFA :: 11111010 // \xFB :: 11111011 // \xFC :: 11111100 // \xFD :: 11111101 // \xFE :: 11111110 // \xFF :: 11111111 (b & 0b1100_0000) != 0b1000_0000 } /* /// Returns the smallest possible index of the next valid UTF-8 sequence /// starting after `i`. /// /// For all inputs, including invalid UTF-8 and any value of `i`, the return /// value is guaranteed to be greater than `i`. (If there is no value greater /// than `i` that fits in `usize`, then this panics.) /// /// Generally speaking, this should only be called on `text` when it is /// permitted to assume that it is valid UTF-8 and where either `i >= /// text.len()` or where `text[i]` is a leading byte of a UTF-8 sequence. /// /// NOTE: This method was used in a previous conception of iterators where we /// specifically tried to skip over empty matches that split a codepoint by /// simply requiring that our next search begin at the beginning of codepoint. /// But we ended up changing that technique to always advance by 1 byte and /// then filter out matches that split a codepoint after-the-fact. Thus, we no /// longer use this method. But I've kept it around in case we want to switch /// back to this approach. Its guarantees are a little subtle, so I'd prefer /// not to rebuild it from whole cloth. pub(crate) fn next(text: &[u8], i: usize) -> usize { let b = match text.get(i) { None => return i.checked_add(1).unwrap(), Some(&b) => b, }; // For cases where we see an invalid UTF-8 byte, there isn't much we can do // other than just start at the next byte. let inc = len(b).unwrap_or(1); i.checked_add(inc).unwrap() } */ <file_sep>/regex-cli/README.md regex-cli ========= This is a command line tool for interacting with the regex, regex-automata and regex-syntax crates. It enables one to print debug representations of various values, run searches, generate DFAs and deserialization code and perform various regex development tasks such as generating tests. ### Installation Simply use `cargo` to install from crates.io. ``` $ cargo install regex-cli ``` ### Example: print debug output The `regex-cli` command provides a way to print the debug output for most of the principle types in the `regex-automata` crate. This can be useful for debugging purposes when working on the `regex` project, or even if you just want a better look at a regex object's internal representation. For example, the following two commands compare and contrast the differences in the NFA for `.` and `(?-u:.)`: ``` $ regex-cli debug thompson '.' --no-table thompson::NFA( >000000: binary-union(2, 1) 000001: \x00-\xFF => 0 ^000002: capture(pid=0, group=0, slot=0) => 10 000003: \x80-\xBF => 11 000004: \xA0-\xBF => 3 000005: \x80-\xBF => 3 000006: \x80-\x9F => 3 000007: \x90-\xBF => 5 000008: \x80-\xBF => 5 000009: \x80-\x8F => 5 000010: sparse(\x00-\t => 11, \x0B-\x7F => 11, \xC2-\xDF => 3, \xE0 => 4, \xE1-\xEC => 5, \xED => 6, \xEE-\xEF => 5, \xF0 => 7, \xF1-\xF3 => 8, \xF4 => 9) 000011: capture(pid=0, group=0, slot=1) => 12 000012: MATCH(0) transition equivalence classes: ByteClasses(0 => [\x00-\t], 1 => [\n], 2 => [\x0B-\x7F], 3 => [\x80-\x8F], 4 => [\x90-\x9F], 5 => [\xA0-\xBF], 6 => [\xC0-\xC1], 7 => [\xC2-\xDF], 8 => [\xE0], 9 => [\xE1-\xEC], 10 => [\xED], 11 => [\xEE-\xEF], 12 => [\xF0], 13 => [\xF1-\xF3], 14 => [\xF4], 15 => [\xF5-\xFF], 16 => [EOI]) ) ``` And now for `(?-u:.)`: ``` $ regex-cli debug thompson -b '(?-u:.)' --no-table thompson::NFA( >000000: binary-union(2, 1) 000001: \x00-\xFF => 0 ^000002: capture(pid=0, group=0, slot=0) => 3 000003: sparse(\x00-\t => 4, \x0B-\xFF => 4) 000004: capture(pid=0, group=0, slot=1) => 5 000005: MATCH(0) transition equivalence classes: ByteClasses(0 => [\x00-\t], 1 => [\n], 2 => [\x0B-\xFF], 3 => [EOI]) ) ``` To make things a bit more concise, we use `--no-table` to omit some extra metadata about the size of the NFA and the time required to build it. In the second example, we also pass the `-b/--no-utf8-syntax` flag. Without it, the command returns an error because patterns are compiled with default settings. The default setting is to forbid any pattern that can possibly match invalid UTF-8. Since `(?-u:.)` matches any byte except for `\n`, it can match invalid UTF-8. Thus, you have to say, "I am explicitly okay with matching invalid UTF-8." ### Example: execute a search This command shows how to run a search with multiple patterns with each containing capture groups. The output shows the value of each matching group. ``` $ regex-cli find capture meta -p '(?m)^(?<key>[[:word:]]+)="(?<val>[^"]+)"$' -p $'(?m)^(?<key>[[:word:]]+)=\'(?<val>[^\']+)\'$' -y 'best_album="Blow Your Face Out"' parse time: 81.541µs translate time: 52.035µs build meta time: 805.696µs search time: 426.391µs total matches: 1 0:{ 0: 0..31/best_album="Blow\x20Your\x20Face\x20Out", 1/key: 0..10/best_album, 2/val: 12..30/Blow\x20Your\x20Face\x20Out } ``` In this case, `meta` refers to the regex engine. It can be a number of other things, including `lite` for testing the `regex-lite` crate. Also, `capture` refers to the kind of search. You can also just ask for the `match` which will print the overall match and not the capture groups: ``` $ regex-cli find match meta -p '(?m)^(?<key>[[:word:]]+)="(?<val>[^"]+)"$' -p $'(?m)^(?<key>[[:word:]]+)=\'(?<val>[^\']+)\'$' -y 'best_album="Blow Your Face Out"' parse time: 67.067µs translate time: 40.005µs build meta time: 586.163µs search time: 291.633µs total matches: 1 0:0:31:best_album="Blow\x20Your\x20Face\x20Out" ``` Since not all regex engines support capture groups, using `match` will open up the ability to test other regex engines such as `hybrid`. Finally, the `-p/--pattern` flag specifies a pattern and the `-y/--haystack` flag provides a haystack to search as a command line argument. One can also omit the `-y/--haystack` flag and provide a file path to search instead: ``` $ echo 'best_album="Blow Your Face Out"' > haystack $ regex-cli find match hybrid -p '(?m)^(?<key>[[:word:]]+)="(?<val>[^"]+)"$' -p $'(?m)^(?<key>[[:word:]]+)=\'(?<val>[^\']+)\'$' haystack parse time: 60.278µs translate time: 43.832µs compile forward nfa time: 462.148µs compile reverse nfa time: 56.275µs build forward hybrid time: 6.532µs build reverse hybrid time: 4.089µs build regex time: 4.899µs cache creation time: 18.59µs search time: 54.653µs total matches: 1 0:0:31:best_album="Blow\x20Your\x20Face\x20Out" ``` ### Example: serialize a DFA One particularly useful command in `regex-cli` is `regex-cli generate serialize`. It takes care of generating and writing a fully compiled DFA to a file, and then producing Rust code that deserializes it. The command line provides oodles of options, including all options found in the `regex-automata` crate for building the DFA in code. Let's walk through a complete end-to-end example. We assume `regex-cli` is already installed per instructions above. Let's start with an empty binary Rust project: ``` $ mkdir regex-dfa-test $ cd regex-dfa-test $ cargo init --bin ``` Now add a dependency on `regex-automata`. Technically, the only feature that needs to be enabled for this example is `dfa-search`, but we include `std` as well to get some conveniences like `std::error::Error` implementations and also optimizations. But you can drop `std` and just use `alloc` or even drop `alloc` too altogether if necessary. ``` $ cargo add regex-automata --features std,dfa-search ``` Now we can generate a DFA with `regex-cli`. This will create three files: the little endian binary serialization of the DFA, the big endian version and a simple Rust source file for lazily deserializing the DFA via a static into a `regex_automata::util::lazy::Lazy`: ``` regex-cli generate serialize sparse dfa \ --minimize \ --shrink \ --start-kind anchored \ --rustfmt \ --safe \ SIMPLE_WORD_FWD \ ./src/ \ "\w" ``` We pass a number of flags here. There are even more available, and generally speaking, there is at least one flag for each configuration knob available in the library. This means that it should be possible to configure the DFA in any way you might expect to be able to in the code. We can briefly explain the flags we use here though: * `--minimize` applies a [DFA minimization] algorithm to try and shrink the size of the DFA as much as possible. In some cases it can make a big difference, but not all. Minimization can also be extremely expensive, but given that this is an offline process and presumably done rarely, it's usually a good trade off to make. * `--shrink` uses heuristics to make the size of the NFA smaller in some cases. This doesn't impact the size of the DFA, but it can make determinization (the process of converting an NFA into a DFA) faster at the cost of making NFA construction slower. This can make overall DFA generation time faster. * `--start-kind anchored` says to build a DFA that only supports anchored searches. (That is, every match must have a start offset equivalent to the start of the search.) Without this, DFAs support both anchored and unanchored searches, and that in turn can make them much bigger than they need to be if you only need one or the other. * `--rustfmt` will run `rustfmt` on the generated Rust code. * `--safe` will use only safe code for deserializing the DFA. This may be slower, but it is a one time cost. If you find that deserializing the DFA is too slow, then dropping this option will use alternative APIs that may result in undefined behavior if the given DFA is not valid. (Every DFA generated by `regex-cli` is intended to be valid. So *not* using `--safe` should always be correct, but it's up to you whether it's worth doing.) [DFA minimization]: https://en.wikipedia.org/wiki/DFA_minimization The final three positional arguments are as follows: * `SIMPLE_WORD_FWD` is the name of the variable in the Rust source code for the DFA, and it is also used in generating the names of the files produced by this command. * `./src/` is the directory to write the files. * `\w` is the regex pattern to build the DFA for. More than one may be given! Once the DFA is generated, you should see three new files in `./src/`: ``` $ ls -l src/ total 32 -rw-rw-r-- 1 andrew users 45 May 28 22:04 main.rs -rw-rw-r-- 1 andrew users 11095 May 30 10:24 simple_word_fwd.bigendian.dfa -rw-rw-r-- 1 andrew users 11095 May 30 10:24 simple_word_fwd.littleendian.dfa -rw-rw-r-- 1 andrew users 711 May 30 10:24 simple_word_fwd.rs ``` At this point, you just need to add the appropriate `mod` definition in `main.rs` and use the DFA: ```rust use regex_automata::{dfa::Automaton, Anchored, Input}; use crate::simple_word_fwd::SIMPLE_WORD_FWD as DFA; mod simple_word_fwd; fn main() { let input = Input::new("ω").anchored(Anchored::Yes); println!("is a word: {:?}", DFA.try_search_fwd(&input)); let input = Input::new("☃").anchored(Anchored::Yes); println!("not a word: {:?}", DFA.try_search_fwd(&input)); } ``` And now run the program: ``` $ cargo run Compiling regex-dfa-test v0.1.0 (/home/andrew/tmp/regex-dfa-test) Finished dev [unoptimized + debuginfo] target(s) in 0.17s Running `target/debug/regex-dfa-test` is a word: Ok(Some(HalfMatch { pattern: PatternID(0), offset: 2 })) not a word: Ok(None) ``` There are a few other things worth mentioning: * The above generates a "sparse" DFA. This sacrifices search performance in favor of (potentially much) smaller DFAs. One can also generate a "dense" DFA to get faster searches but larger DFAs. * Above, we generated a "dfa," but one can also generate a "regex." The difference is that a DFA can only find the end of a match (or start of a match if the DFA is reversed), where as a regex will generate two DFAs: one for finding the end of a match and then another for finding the start. One can generate two DFAs manually and stitch them together in the code, but generating a `regex` will take care of this for you. <file_sep>/testdata/crlf.toml # This is a basic test that checks ^ and $ treat \r\n as a single line # terminator. If ^ and $ only treated \n as a line terminator, then this would # only match 'xyz' at the end of the haystack. [[test]] name = "basic" regex = '(?mR)^[a-z]+$' haystack = "abc\r\ndef\r\nxyz" matches = [[0, 3], [5, 8], [10, 13]] # Tests that a CRLF-aware '^$' assertion does not match between CR and LF. [[test]] name = "start-end-non-empty" regex = '(?mR)^$' haystack = "abc\r\ndef\r\nxyz" matches = [] # Tests that a CRLF-aware '^$' assertion matches the empty string, just like # a non-CRLF-aware '^$' assertion. [[test]] name = "start-end-empty" regex = '(?mR)^$' haystack = "" matches = [[0, 0]] # Tests that a CRLF-aware '^$' assertion matches the empty string preceding # and following a line terminator. [[test]] name = "start-end-before-after" regex = '(?mR)^$' haystack = "\r\n" matches = [[0, 0], [2, 2]] # Tests that a CRLF-aware '^' assertion does not split a line terminator. [[test]] name = "start-no-split" regex = '(?mR)^' haystack = "abc\r\ndef\r\nxyz" matches = [[0, 0], [5, 5], [10, 10]] # Same as above, but with adjacent runs of line terminators. [[test]] name = "start-no-split-adjacent" regex = '(?mR)^' haystack = "\r\n\r\n\r\n" matches = [[0, 0], [2, 2], [4, 4], [6, 6]] # Same as above, but with adjacent runs of just carriage returns. [[test]] name = "start-no-split-adjacent-cr" regex = '(?mR)^' haystack = "\r\r\r" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] # Same as above, but with adjacent runs of just line feeds. [[test]] name = "start-no-split-adjacent-lf" regex = '(?mR)^' haystack = "\n\n\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] # Tests that a CRLF-aware '$' assertion does not split a line terminator. [[test]] name = "end-no-split" regex = '(?mR)$' haystack = "abc\r\ndef\r\nxyz" matches = [[3, 3], [8, 8], [13, 13]] # Same as above, but with adjacent runs of line terminators. [[test]] name = "end-no-split-adjacent" regex = '(?mR)$' haystack = "\r\n\r\n\r\n" matches = [[0, 0], [2, 2], [4, 4], [6, 6]] # Same as above, but with adjacent runs of just carriage returns. [[test]] name = "end-no-split-adjacent-cr" regex = '(?mR)$' haystack = "\r\r\r" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] # Same as above, but with adjacent runs of just line feeds. [[test]] name = "end-no-split-adjacent-lf" regex = '(?mR)$' haystack = "\n\n\n" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] # Tests that '.' does not match either \r or \n when CRLF mode is enabled. Note # that this doesn't require multi-line mode to be enabled. [[test]] name = "dot-no-crlf" regex = '(?R).' haystack = "\r\n\r\n\r\n" matches = [] # This is a test that caught a bug in the one-pass DFA where it (amazingly) was # using 'is_end_lf' instead of 'is_end_crlf' here. It was probably a copy & # paste bug. We insert an empty capture group here because it provokes the meta # regex engine to first find a match and then trip over a panic because the # one-pass DFA erroneously says there is no match. [[test]] name = "onepass-wrong-crlf-with-capture" regex = '(?Rm:().$)' haystack = "ZZ\r" matches = [[[1, 2], [1, 1]]] # This is like onepass-wrong-crlf-with-capture above, except it sets up the # test so that it can be run by the one-pass DFA directly. (i.e., Make it # anchored and start the search at the right place.) [[test]] name = "onepass-wrong-crlf-anchored" regex = '(?Rm:.$)' haystack = "ZZ\r" matches = [[1, 2]] anchored = true bounds = [1, 3] <file_sep>/tests/replace.rs macro_rules! replace( ($name:ident, $which:ident, $re:expr, $search:expr, $replace:expr, $result:expr) => ( #[test] fn $name() { let re = regex::Regex::new($re).unwrap(); assert_eq!(re.$which($search, $replace), $result); } ); ); replace!(first, replace, r"[0-9]", "age: 26", "Z", "age: Z6"); replace!(plus, replace, r"[0-9]+", "age: 26", "Z", "age: Z"); replace!(all, replace_all, r"[0-9]", "age: 26", "Z", "age: ZZ"); replace!(groups, replace, r"([^ ]+)[ ]+([^ ]+)", "w1 w2", "$2 $1", "w2 w1"); replace!( double_dollar, replace, r"([^ ]+)[ ]+([^ ]+)", "w1 w2", "$2 $$1", "w2 $1" ); // replace!(adjacent_index, replace, // r"([^aeiouy])ies$", "skies", "$1y", "sky"); replace!( named, replace_all, r"(?P<first>[^ ]+)[ ]+(?P<last>[^ ]+)(?P<space>[ ]*)", "w1 w2 w3 w4", "$last $first$space", "w2 w1 w4 w3" ); replace!( trim, replace_all, "^[ \t]+|[ \t]+$", " \t trim me\t \t", "", "trim me" ); replace!(number_hyphen, replace, r"(.)(.)", "ab", "$1-$2", "a-b"); // replace!(number_underscore, replace, r"(.)(.)", "ab", "$1_$2", "a_b"); replace!( simple_expand, replace_all, r"([a-z]) ([a-z])", "a b", "$2 $1", "b a" ); replace!( literal_dollar1, replace_all, r"([a-z]+) ([a-z]+)", "a b", "$$1", "$1" ); replace!( literal_dollar2, replace_all, r"([a-z]+) ([a-z]+)", "a b", "$2 $$c $1", "b $c a" ); replace!( no_expand1, replace, r"([^ ]+)[ ]+([^ ]+)", "w1 w2", regex::NoExpand("$2 $1"), "$2 $1" ); replace!( no_expand2, replace, r"([^ ]+)[ ]+([^ ]+)", "w1 w2", regex::NoExpand("$$1"), "$$1" ); replace!( closure_returning_reference, replace, r"([0-9]+)", "age: 26", |captures: &regex::Captures<'_>| { captures[1][0..1].to_owned() }, "age: 2" ); replace!( closure_returning_value, replace, r"[0-9]+", "age: 26", |_captures: &regex::Captures<'_>| "Z".to_owned(), "age: Z" ); // See https://github.com/rust-lang/regex/issues/314 replace!( match_at_start_replace_with_empty, replace_all, r"foo", "foobar", "", "bar" ); // See https://github.com/rust-lang/regex/issues/393 replace!(single_empty_match, replace, r"^", "bar", "foo", "foobar"); // See https://github.com/rust-lang/regex/issues/399 replace!( capture_longest_possible_name, replace_all, r"(.)", "b", "${1}a $1a", "ba " ); replace!( impl_string, replace, r"[0-9]", "age: 26", "Z".to_string(), "age: Z6" ); replace!( impl_string_ref, replace, r"[0-9]", "age: 26", &"Z".to_string(), "age: Z6" ); replace!( impl_cow_str_borrowed, replace, r"[0-9]", "age: 26", std::borrow::Cow::<'_, str>::Borrowed("Z"), "age: Z6" ); replace!( impl_cow_str_borrowed_ref, replace, r"[0-9]", "age: 26", &std::borrow::Cow::<'_, str>::Borrowed("Z"), "age: Z6" ); replace!( impl_cow_str_owned, replace, r"[0-9]", "age: 26", std::borrow::Cow::<'_, str>::Owned("Z".to_string()), "age: Z6" ); replace!( impl_cow_str_owned_ref, replace, r"[0-9]", "age: 26", &std::borrow::Cow::<'_, str>::Owned("Z".to_string()), "age: Z6" ); #[test] fn replacen_no_captures() { let re = regex::Regex::new(r"[0-9]").unwrap(); assert_eq!(re.replacen("age: 1234", 2, "Z"), "age: ZZ34"); } #[test] fn replacen_with_captures() { let re = regex::Regex::new(r"([0-9])").unwrap(); assert_eq!(re.replacen("age: 1234", 2, "${1}Z"), "age: 1Z2Z34"); } <file_sep>/regex-automata/src/meta/reverse_inner.rs /*! A module dedicated to plucking inner literals out of a regex pattern, and then constructing a prefilter for them. We also include a regex pattern "prefix" that corresponds to the bits of the regex that need to match before the literals do. The reverse inner optimization then proceeds by looking for matches of the inner literal(s), and then doing a reverse search of the prefix from the start of the literal match to find the overall start position of the match. The essential invariant we want to uphold here is that the literals we return reflect a set where *at least* one of them must match in order for the overall regex to match. We also need to maintain the invariant that the regex prefix returned corresponds to the entirety of the regex up until the literals we return. This somewhat limits what we can do. That is, if we a regex like `\w+(@!|%%)\w+`, then we can pluck the `{@!, %%}` out and build a prefilter from it. Then we just need to compile `\w+` in reverse. No fuss no muss. But if we have a regex like \d+@!|\w+%%`, then we get kind of stymied. Technically, we could still extract `{@!, %%}`, and it is true that at least of them must match. But then, what is our regex prefix? Again, in theory, that could be `\d+|\w+`, but that's not quite right, because the `\d+` only matches when `@!` matches, and `\w+` only matches when `%%` matches. All of that is technically possible to do, but it seemingly requires a lot of sophistication and machinery. Probably the way to tackle that is with some kind of formalism and approach this problem more generally. For now, the code below basically just looks for a top-level concatenation. And if it can find one, it looks for literals in each of the direct child sub-expressions of that concatenation. If some good ones are found, we return those and a concatenation of the Hir expressions seen up to that point. */ use alloc::vec::Vec; use regex_syntax::hir::{self, literal, Hir, HirKind}; use crate::{util::prefilter::Prefilter, MatchKind}; /// Attempts to extract an "inner" prefilter from the given HIR expressions. If /// one was found, then a concatenation of the HIR expressions that precede it /// is returned. /// /// The idea here is that the prefilter returned can be used to find candidate /// matches. And then the HIR returned can be used to build a reverse regex /// matcher, which will find the start of the candidate match. Finally, the /// match still has to be confirmed with a normal anchored forward scan to find /// the end position of the match. /// /// Note that this assumes leftmost-first match semantics, so callers must /// not call this otherwise. pub(crate) fn extract(hirs: &[&Hir]) -> Option<(Hir, Prefilter)> { if hirs.len() != 1 { debug!( "skipping reverse inner optimization since it only \ supports 1 pattern, {} were given", hirs.len(), ); return None; } let mut concat = match top_concat(hirs[0]) { Some(concat) => concat, None => { debug!( "skipping reverse inner optimization because a top-level \ concatenation could not found", ); return None; } }; // We skip the first HIR because if it did have a prefix prefilter in it, // we probably wouldn't be here looking for an inner prefilter. for i in 1..concat.len() { let hir = &concat[i]; let pre = match prefilter(hir) { None => continue, Some(pre) => pre, }; // Even if we got a prefilter, if it isn't consider "fast," then we // probably don't want to bother with it. Namely, since the reverse // inner optimization requires some overhead, it likely only makes // sense if the prefilter scan itself is (believed) to be much faster // than the regex engine. if !pre.is_fast() { debug!( "skipping extracted inner prefilter because \ it probably isn't fast" ); continue; } let concat_suffix = Hir::concat(concat.split_off(i)); let concat_prefix = Hir::concat(concat); // Look for a prefilter again. Why? Because above we only looked for // a prefilter on the individual 'hir', but we might be able to find // something better and more discriminatory by looking at the entire // suffix. We don't do this above to avoid making this loop worst case // quadratic in the length of 'concat'. let pre2 = match prefilter(&concat_suffix) { None => pre, Some(pre2) => { if pre2.is_fast() { pre2 } else { pre } } }; return Some((concat_prefix, pre2)); } debug!( "skipping reverse inner optimization because a top-level \ sub-expression with a fast prefilter could not be found" ); None } /// Attempt to extract a prefilter from an HIR expression. /// /// We do a little massaging here to do our best that the prefilter we get out /// of this is *probably* fast. Basically, the false positive rate has a much /// higher impact for things like the reverse inner optimization because more /// work needs to potentially be done for each candidate match. /// /// Note that this assumes leftmost-first match semantics, so callers must /// not call this otherwise. fn prefilter(hir: &Hir) -> Option<Prefilter> { let mut extractor = literal::Extractor::new(); extractor.kind(literal::ExtractKind::Prefix); let mut prefixes = extractor.extract(hir); debug!( "inner prefixes (len={:?}) extracted before optimization: {:?}", prefixes.len(), prefixes ); // Since these are inner literals, we know they cannot be exact. But the // extractor doesn't know this. We mark them as inexact because this might // impact literal optimization. Namely, optimization weights "all literals // are exact" as very high, because it presumes that any match results in // an overall match. But of course, that is not the case here. // // In practice, this avoids plucking out a ASCII-only \s as an alternation // of single-byte whitespace characters. prefixes.make_inexact(); prefixes.optimize_for_prefix_by_preference(); debug!( "inner prefixes (len={:?}) extracted after optimization: {:?}", prefixes.len(), prefixes ); prefixes .literals() .and_then(|lits| Prefilter::new(MatchKind::LeftmostFirst, lits)) } /// Looks for a "top level" HirKind::Concat item in the given HIR. This will /// try to return one even if it's embedded in a capturing group, but is /// otherwise pretty conservative in what is returned. /// /// The HIR returned is a complete copy of the concat with all capturing /// groups removed. In effect, the concat returned is "flattened" with respect /// to capturing groups. This makes the detection logic above for prefixes /// a bit simpler, and it works because 1) capturing groups never influence /// whether a match occurs or not and 2) capturing groups are not used when /// doing the reverse inner search to find the start of the match. fn top_concat(mut hir: &Hir) -> Option<Vec<Hir>> { loop { hir = match hir.kind() { HirKind::Empty | HirKind::Literal(_) | HirKind::Class(_) | HirKind::Look(_) | HirKind::Repetition(_) | HirKind::Alternation(_) => return None, HirKind::Capture(hir::Capture { ref sub, .. }) => sub, HirKind::Concat(ref subs) => { // We are careful to only do the flattening/copy when we know // we have a "top level" concat we can inspect. This avoids // doing extra work in cases where we definitely won't use it. // (This might still be wasted work if we can't go on to find // some literals to extract.) let concat = Hir::concat(subs.iter().map(|h| flatten(h)).collect()); return match concat.into_kind() { HirKind::Concat(xs) => Some(xs), // It is actually possible for this case to occur, because // 'Hir::concat' might simplify the expression to the point // that concatenations are actually removed. One wonders // whether this leads to other cases where we should be // extracting literals, but in theory, I believe if we do // get here, then it means that a "real" prefilter failed // to be extracted and we should probably leave well enough // alone. (A "real" prefilter is unbothered by "top-level // concats" and "capturing groups.") _ => return None, }; } }; } } /// Returns a copy of the given HIR but with all capturing groups removed. fn flatten(hir: &Hir) -> Hir { match hir.kind() { HirKind::Empty => Hir::empty(), HirKind::Literal(hir::Literal(ref x)) => Hir::literal(x.clone()), HirKind::Class(ref x) => Hir::class(x.clone()), HirKind::Look(ref x) => Hir::look(x.clone()), HirKind::Repetition(ref x) => Hir::repetition(x.with(flatten(&x.sub))), // This is the interesting case. We just drop the group information // entirely and use the child HIR itself. HirKind::Capture(hir::Capture { ref sub, .. }) => flatten(sub), HirKind::Alternation(ref xs) => { Hir::alternation(xs.iter().map(|x| flatten(x)).collect()) } HirKind::Concat(ref xs) => { Hir::concat(xs.iter().map(|x| flatten(x)).collect()) } } } <file_sep>/testdata/anchored.toml # These tests are specifically geared toward searches with 'anchored = true'. # While they are interesting in their own right, they are particularly # important for testing the one-pass DFA since the one-pass DFA can't work in # unanchored contexts. # # Note that "anchored" in this context does not mean "^". Anchored searches are # searches whose matches must begin at the start of the search, which may not # be at the start of the haystack. That's why anchored searches---and there are # some examples below---can still report multiple matches. This occurs when the # matches are adjacent to one another. [[test]] name = "greedy" regex = '(abc)+' haystack = "abcabcabc" matches = [ [[0, 9], [6, 9]], ] anchored = true # When a "earliest" search is used, greediness doesn't really exist because # matches are reported as soon as they are known. [[test]] name = "greedy-earliest" regex = '(abc)+' haystack = "abcabcabc" matches = [ [[0, 3], [0, 3]], [[3, 6], [3, 6]], [[6, 9], [6, 9]], ] anchored = true search-kind = "earliest" [[test]] name = "nongreedy" regex = '(abc)+?' haystack = "abcabcabc" matches = [ [[0, 3], [0, 3]], [[3, 6], [3, 6]], [[6, 9], [6, 9]], ] anchored = true # When "all" semantics are used, non-greediness doesn't exist since the longest # possible match is always taken. [[test]] name = "nongreedy-all" regex = '(abc)+?' haystack = "abcabcabc" matches = [ [[0, 9], [6, 9]], ] anchored = true match-kind = "all" [[test]] name = "word-boundary-unicode-01" regex = '\b\w+\b' haystack = 'βββ☃' matches = [[0, 6]] anchored = true [[test]] name = "word-boundary-nounicode-01" regex = '\b\w+\b' haystack = 'abcβ' matches = [[0, 3]] anchored = true unicode = false # Tests that '.c' doesn't match 'abc' when performing an anchored search from # the beginning of the haystack. This test found two different bugs in the # PikeVM and the meta engine. [[test]] name = "no-match-at-start" regex = '.c' haystack = 'abc' matches = [] anchored = true # Like above, but at a non-zero start offset. [[test]] name = "no-match-at-start-bounds" regex = '.c' haystack = 'aabc' bounds = [1, 4] matches = [] anchored = true # This is like no-match-at-start, but hits the "reverse inner" optimization # inside the meta engine. (no-match-at-start hits the "reverse suffix" # optimization.) [[test]] name = "no-match-at-start-reverse-inner" regex = '.c[a-z]' haystack = 'abcz' matches = [] anchored = true # Like above, but at a non-zero start offset. [[test]] name = "no-match-at-start-reverse-inner-bounds" regex = '.c[a-z]' haystack = 'aabcz' bounds = [1, 5] matches = [] anchored = true # Same as no-match-at-start, but applies to the meta engine's "reverse # anchored" optimization. [[test]] name = "no-match-at-start-reverse-anchored" regex = '.c[a-z]$' haystack = 'abcz' matches = [] anchored = true # Like above, but at a non-zero start offset. [[test]] name = "no-match-at-start-reverse-anchored-bounds" regex = '.c[a-z]$' haystack = 'aabcz' bounds = [1, 5] matches = [] anchored = true <file_sep>/regex-automata/src/dfa/dense.rs /*! Types and routines specific to dense DFAs. This module is the home of [`dense::DFA`](DFA). This module also contains a [`dense::Builder`](Builder) and a [`dense::Config`](Config) for building and configuring a dense DFA. */ #[cfg(feature = "dfa-build")] use core::cmp; use core::{convert::TryFrom, fmt, iter, mem::size_of, slice}; #[cfg(feature = "dfa-build")] use alloc::{ collections::{BTreeMap, BTreeSet}, vec, vec::Vec, }; #[cfg(feature = "dfa-build")] use crate::{ dfa::{ accel::Accel, determinize, minimize::Minimizer, remapper::Remapper, sparse, }, nfa::thompson, util::{look::LookMatcher, search::MatchKind}, }; use crate::{ dfa::{ accel::Accels, automaton::{fmt_state_indicator, Automaton}, special::Special, start::StartKind, DEAD, }, util::{ alphabet::{self, ByteClasses, ByteSet}, int::{Pointer, Usize}, prefilter::Prefilter, primitives::{PatternID, StateID}, search::{Anchored, Input, MatchError}, start::{Start, StartByteMap}, wire::{self, DeserializeError, Endian, SerializeError}, }, }; /// The label that is pre-pended to a serialized DFA. const LABEL: &str = "rust-regex-automata-dfa-dense"; /// The format version of dense regexes. This version gets incremented when a /// change occurs. A change may not necessarily be a breaking change, but the /// version does permit good error messages in the case where a breaking change /// is made. const VERSION: u32 = 2; /// The configuration used for compiling a dense DFA. /// /// As a convenience, [`DFA::config`] is an alias for [`Config::new`]. The /// advantage of the former is that it often lets you avoid importing the /// `Config` type directly. /// /// A dense DFA configuration is a simple data object that is typically used /// with [`dense::Builder::configure`](self::Builder::configure). /// /// The default configuration guarantees that a search will never return /// a "quit" error, although it is possible for a search to fail if /// [`Config::starts_for_each_pattern`] wasn't enabled (which it is not by /// default) and an [`Anchored::Pattern`] mode is requested via [`Input`]. #[cfg(feature = "dfa-build")] #[derive(Clone, Debug, Default)] pub struct Config { // As with other configuration types in this crate, we put all our knobs // in options so that we can distinguish between "default" and "not set." // This makes it possible to easily combine multiple configurations // without default values overwriting explicitly specified values. See the // 'overwrite' method. // // For docs on the fields below, see the corresponding method setters. accelerate: Option<bool>, pre: Option<Option<Prefilter>>, minimize: Option<bool>, match_kind: Option<MatchKind>, start_kind: Option<StartKind>, starts_for_each_pattern: Option<bool>, byte_classes: Option<bool>, unicode_word_boundary: Option<bool>, quitset: Option<ByteSet>, specialize_start_states: Option<bool>, dfa_size_limit: Option<Option<usize>>, determinize_size_limit: Option<Option<usize>>, } #[cfg(feature = "dfa-build")] impl Config { /// Return a new default dense DFA compiler configuration. pub fn new() -> Config { Config::default() } /// Enable state acceleration. /// /// When enabled, DFA construction will analyze each state to determine /// whether it is eligible for simple acceleration. Acceleration typically /// occurs when most of a state's transitions loop back to itself, leaving /// only a select few bytes that will exit the state. When this occurs, /// other routines like `memchr` can be used to look for those bytes which /// may be much faster than traversing the DFA. /// /// Callers may elect to disable this if consistent performance is more /// desirable than variable performance. Namely, acceleration can sometimes /// make searching slower than it otherwise would be if the transitions /// that leave accelerated states are traversed frequently. /// /// See [`Automaton::accelerator`](crate::dfa::Automaton::accelerator) for /// an example. /// /// This is enabled by default. pub fn accelerate(mut self, yes: bool) -> Config { self.accelerate = Some(yes); self } /// Set a prefilter to be used whenever a start state is entered. /// /// A [`Prefilter`] in this context is meant to accelerate searches by /// looking for literal prefixes that every match for the corresponding /// pattern (or patterns) must start with. Once a prefilter produces a /// match, the underlying search routine continues on to try and confirm /// the match. /// /// Be warned that setting a prefilter does not guarantee that the search /// will be faster. While it's usually a good bet, if the prefilter /// produces a lot of false positive candidates (i.e., positions matched /// by the prefilter but not by the regex), then the overall result can /// be slower than if you had just executed the regex engine without any /// prefilters. /// /// Note that unless [`Config::specialize_start_states`] has been /// explicitly set, then setting this will also enable (when `pre` is /// `Some`) or disable (when `pre` is `None`) start state specialization. /// This occurs because without start state specialization, a prefilter /// is likely to be less effective. And without a prefilter, start state /// specialization is usually pointless. /// /// **WARNING:** Note that prefilters are not preserved as part of /// serialization. Serializing a DFA will drop its prefilter. /// /// By default no prefilter is set. /// /// # Example /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton}, /// util::prefilter::Prefilter, /// Input, HalfMatch, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); /// let re = DFA::builder() /// .configure(DFA::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let input = Input::new("foo1 barfox bar"); /// assert_eq!( /// Some(HalfMatch::must(0, 11)), /// re.try_search_fwd(&input)?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Be warned though that an incorrect prefilter can lead to incorrect /// results! /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton}, /// util::prefilter::Prefilter, /// Input, HalfMatch, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); /// let re = DFA::builder() /// .configure(DFA::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let input = Input::new("foo1 barfox bar"); /// assert_eq!( /// // No match reported even though there clearly is one! /// None, /// re.try_search_fwd(&input)?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn prefilter(mut self, pre: Option<Prefilter>) -> Config { self.pre = Some(pre); if self.specialize_start_states.is_none() { self.specialize_start_states = Some(self.get_prefilter().is_some()); } self } /// Minimize the DFA. /// /// When enabled, the DFA built will be minimized such that it is as small /// as possible. /// /// Whether one enables minimization or not depends on the types of costs /// you're willing to pay and how much you care about its benefits. In /// particular, minimization has worst case `O(n*k*logn)` time and `O(k*n)` /// space, where `n` is the number of DFA states and `k` is the alphabet /// size. In practice, minimization can be quite costly in terms of both /// space and time, so it should only be done if you're willing to wait /// longer to produce a DFA. In general, you might want a minimal DFA in /// the following circumstances: /// /// 1. You would like to optimize for the size of the automaton. This can /// manifest in one of two ways. Firstly, if you're converting the /// DFA into Rust code (or a table embedded in the code), then a minimal /// DFA will translate into a corresponding reduction in code size, and /// thus, also the final compiled binary size. Secondly, if you are /// building many DFAs and putting them on the heap, you'll be able to /// fit more if they are smaller. Note though that building a minimal /// DFA itself requires additional space; you only realize the space /// savings once the minimal DFA is constructed (at which point, the /// space used for minimization is freed). /// 2. You've observed that a smaller DFA results in faster match /// performance. Naively, this isn't guaranteed since there is no /// inherent difference between matching with a bigger-than-minimal /// DFA and a minimal DFA. However, a smaller DFA may make use of your /// CPU's cache more efficiently. /// 3. You are trying to establish an equivalence between regular /// languages. The standard method for this is to build a minimal DFA /// for each language and then compare them. If the DFAs are equivalent /// (up to state renaming), then the languages are equivalent. /// /// Typically, minimization only makes sense as an offline process. That /// is, one might minimize a DFA before serializing it to persistent /// storage. In practical terms, minimization can take around an order of /// magnitude more time than compiling the initial DFA via determinization. /// /// This option is disabled by default. pub fn minimize(mut self, yes: bool) -> Config { self.minimize = Some(yes); self } /// Set the desired match semantics. /// /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the /// match semantics of Perl-like regex engines. That is, when multiple /// patterns would match at the same leftmost position, the pattern that /// appears first in the concrete syntax is chosen. /// /// Currently, the only other kind of match semantics supported is /// [`MatchKind::All`]. This corresponds to classical DFA construction /// where all possible matches are added to the DFA. /// /// Typically, `All` is used when one wants to execute an overlapping /// search and `LeftmostFirst` otherwise. In particular, it rarely makes /// sense to use `All` with the various "leftmost" find routines, since the /// leftmost routines depend on the `LeftmostFirst` automata construction /// strategy. Specifically, `LeftmostFirst` adds dead states to the DFA /// as a way to terminate the search and report a match. `LeftmostFirst` /// also supports non-greedy matches using this strategy where as `All` /// does not. /// /// # Example: overlapping search /// /// This example shows the typical use of `MatchKind::All`, which is to /// report overlapping matches. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::{Automaton, OverlappingState, dense}, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().match_kind(MatchKind::All)) /// .build_many(&[r"\w+$", r"\S+$"])?; /// let input = Input::new("@foo"); /// let mut state = OverlappingState::start(); /// /// let expected = Some(HalfMatch::must(1, 4)); /// dfa.try_search_overlapping_fwd(&input, &mut state)?; /// assert_eq!(expected, state.get_match()); /// /// // The first pattern also matches at the same position, so re-running /// // the search will yield another match. Notice also that the first /// // pattern is returned after the second. This is because the second /// // pattern begins its match before the first, is therefore an earlier /// // match and is thus reported first. /// let expected = Some(HalfMatch::must(0, 4)); /// dfa.try_search_overlapping_fwd(&input, &mut state)?; /// assert_eq!(expected, state.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: reverse automaton to find start of match /// /// Another example for using `MatchKind::All` is for constructing a /// reverse automaton to find the start of a match. `All` semantics are /// used for this in order to find the longest possible match, which /// corresponds to the leftmost starting position. /// /// Note that if you need the starting position then /// [`dfa::regex::Regex`](crate::dfa::regex::Regex) will handle this for /// you, so it's usually not necessary to do this yourself. /// /// ``` /// use regex_automata::{ /// dfa::{dense, Automaton, StartKind}, /// nfa::thompson::NFA, /// Anchored, HalfMatch, Input, MatchKind, /// }; /// /// let haystack = "123foobar456".as_bytes(); /// let pattern = r"[a-z]+r"; /// /// let dfa_fwd = dense::DFA::new(pattern)?; /// let dfa_rev = dense::Builder::new() /// .thompson(NFA::config().reverse(true)) /// .configure(dense::Config::new() /// // This isn't strictly necessary since both anchored and /// // unanchored searches are supported by default. But since /// // finding the start-of-match only requires anchored searches, /// // we can get rid of the unanchored configuration and possibly /// // slim down our DFA considerably. /// .start_kind(StartKind::Anchored) /// .match_kind(MatchKind::All) /// ) /// .build(pattern)?; /// let expected_fwd = HalfMatch::must(0, 9); /// let expected_rev = HalfMatch::must(0, 3); /// let got_fwd = dfa_fwd.try_search_fwd(&Input::new(haystack))?.unwrap(); /// // Here we don't specify the pattern to search for since there's only /// // one pattern and we're doing a leftmost search. But if this were an /// // overlapping search, you'd need to specify the pattern that matched /// // in the forward direction. (Otherwise, you might wind up finding the /// // starting position of a match of some other pattern.) That in turn /// // requires building the reverse automaton with starts_for_each_pattern /// // enabled. Indeed, this is what Regex does internally. /// let input = Input::new(haystack) /// .range(..got_fwd.offset()) /// .anchored(Anchored::Yes); /// let got_rev = dfa_rev.try_search_rev(&input)?.unwrap(); /// assert_eq!(expected_fwd, got_fwd); /// assert_eq!(expected_rev, got_rev); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn match_kind(mut self, kind: MatchKind) -> Config { self.match_kind = Some(kind); self } /// The type of starting state configuration to use for a DFA. /// /// By default, the starting state configuration is [`StartKind::Both`]. /// /// # Example /// /// ``` /// use regex_automata::{ /// dfa::{dense::DFA, Automaton, StartKind}, /// Anchored, HalfMatch, Input, /// }; /// /// let haystack = "quux foo123"; /// let expected = HalfMatch::must(0, 11); /// /// // By default, DFAs support both anchored and unanchored searches. /// let dfa = DFA::new(r"[0-9]+")?; /// let input = Input::new(haystack); /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); /// /// // But if we only need anchored searches, then we can build a DFA /// // that only supports anchored searches. This leads to a smaller DFA /// // (potentially significantly smaller in some cases), but a DFA that /// // will panic if you try to use it with an unanchored search. /// let dfa = DFA::builder() /// .configure(DFA::config().start_kind(StartKind::Anchored)) /// .build(r"[0-9]+")?; /// let input = Input::new(haystack) /// .range(8..) /// .anchored(Anchored::Yes); /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn start_kind(mut self, kind: StartKind) -> Config { self.start_kind = Some(kind); self } /// Whether to compile a separate start state for each pattern in the /// automaton. /// /// When enabled, a separate **anchored** start state is added for each /// pattern in the DFA. When this start state is used, then the DFA will /// only search for matches for the pattern specified, even if there are /// other patterns in the DFA. /// /// The main downside of this option is that it can potentially increase /// the size of the DFA and/or increase the time it takes to build the DFA. /// /// There are a few reasons one might want to enable this (it's disabled /// by default): /// /// 1. When looking for the start of an overlapping match (using a /// reverse DFA), doing it correctly requires starting the reverse search /// using the starting state of the pattern that matched in the forward /// direction. Indeed, when building a [`Regex`](crate::dfa::regex::Regex), /// it will automatically enable this option when building the reverse DFA /// internally. /// 2. When you want to use a DFA with multiple patterns to both search /// for matches of any pattern or to search for anchored matches of one /// particular pattern while using the same DFA. (Otherwise, you would need /// to compile a new DFA for each pattern.) /// 3. Since the start states added for each pattern are anchored, if you /// compile an unanchored DFA with one pattern while also enabling this /// option, then you can use the same DFA to perform anchored or unanchored /// searches. The latter you get with the standard search APIs. The former /// you get from the various `_at` search methods that allow you specify a /// pattern ID to search for. /// /// By default this is disabled. /// /// # Example /// /// This example shows how to use this option to permit the same DFA to /// run both anchored and unanchored searches for a single pattern. /// /// ``` /// use regex_automata::{ /// dfa::{dense, Automaton}, /// Anchored, HalfMatch, PatternID, Input, /// }; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().starts_for_each_pattern(true)) /// .build(r"foo[0-9]+")?; /// let haystack = "quux foo123"; /// /// // Here's a normal unanchored search. Notice that we use 'None' for the /// // pattern ID. Since the DFA was built as an unanchored machine, it /// // use its default unanchored starting state. /// let expected = HalfMatch::must(0, 11); /// let input = Input::new(haystack); /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); /// // But now if we explicitly specify the pattern to search ('0' being /// // the only pattern in the DFA), then it will use the starting state /// // for that specific pattern which is always anchored. Since the /// // pattern doesn't have a match at the beginning of the haystack, we /// // find nothing. /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(0))); /// assert_eq!(None, dfa.try_search_fwd(&input)?); /// // And finally, an anchored search is not the same as putting a '^' at /// // beginning of the pattern. An anchored search can only match at the /// // beginning of the *search*, which we can change: /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(0))) /// .range(5..); /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { self.starts_for_each_pattern = Some(yes); self } /// Whether to attempt to shrink the size of the DFA's alphabet or not. /// /// This option is enabled by default and should never be disabled unless /// one is debugging a generated DFA. /// /// When enabled, the DFA will use a map from all possible bytes to their /// corresponding equivalence class. Each equivalence class represents a /// set of bytes that does not discriminate between a match and a non-match /// in the DFA. For example, the pattern `[ab]+` has at least two /// equivalence classes: a set containing `a` and `b` and a set containing /// every byte except for `a` and `b`. `a` and `b` are in the same /// equivalence class because they never discriminate between a match and a /// non-match. /// /// The advantage of this map is that the size of the transition table /// can be reduced drastically from `#states * 256 * sizeof(StateID)` to /// `#states * k * sizeof(StateID)` where `k` is the number of equivalence /// classes (rounded up to the nearest power of 2). As a result, total /// space usage can decrease substantially. Moreover, since a smaller /// alphabet is used, DFA compilation becomes faster as well. /// /// **WARNING:** This is only useful for debugging DFAs. Disabling this /// does not yield any speed advantages. Namely, even when this is /// disabled, a byte class map is still used while searching. The only /// difference is that every byte will be forced into its own distinct /// equivalence class. This is useful for debugging the actual generated /// transitions because it lets one see the transitions defined on actual /// bytes instead of the equivalence classes. pub fn byte_classes(mut self, yes: bool) -> Config { self.byte_classes = Some(yes); self } /// Heuristically enable Unicode word boundaries. /// /// When set, this will attempt to implement Unicode word boundaries as if /// they were ASCII word boundaries. This only works when the search input /// is ASCII only. If a non-ASCII byte is observed while searching, then a /// [`MatchError::quit`](crate::MatchError::quit) error is returned. /// /// A possible alternative to enabling this option is to simply use an /// ASCII word boundary, e.g., via `(?-u:\b)`. The main reason to use this /// option is if you absolutely need Unicode support. This option lets one /// use a fast search implementation (a DFA) for some potentially very /// common cases, while providing the option to fall back to some other /// regex engine to handle the general case when an error is returned. /// /// If the pattern provided has no Unicode word boundary in it, then this /// option has no effect. (That is, quitting on a non-ASCII byte only /// occurs when this option is enabled _and_ a Unicode word boundary is /// present in the pattern.) /// /// This is almost equivalent to setting all non-ASCII bytes to be quit /// bytes. The only difference is that this will cause non-ASCII bytes to /// be quit bytes _only_ when a Unicode word boundary is present in the /// pattern. /// /// When enabling this option, callers _must_ be prepared to handle /// a [`MatchError`](crate::MatchError) error during search. /// When using a [`Regex`](crate::dfa::regex::Regex), this corresponds /// to using the `try_` suite of methods. Alternatively, if /// callers can guarantee that their input is ASCII only, then a /// [`MatchError::quit`](crate::MatchError::quit) error will never be /// returned while searching. /// /// This is disabled by default. /// /// # Example /// /// This example shows how to heuristically enable Unicode word boundaries /// in a pattern. It also shows what happens when a search comes across a /// non-ASCII byte. /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, dense}, /// HalfMatch, Input, MatchError, /// }; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().unicode_word_boundary(true)) /// .build(r"\b[0-9]+\b")?; /// /// // The match occurs before the search ever observes the snowman /// // character, so no error occurs. /// let haystack = "foo 123 ☃".as_bytes(); /// let expected = Some(HalfMatch::must(0, 7)); /// let got = dfa.try_search_fwd(&Input::new(haystack))?; /// assert_eq!(expected, got); /// /// // Notice that this search fails, even though the snowman character /// // occurs after the ending match offset. This is because search /// // routines read one byte past the end of the search to account for /// // look-around, and indeed, this is required here to determine whether /// // the trailing \b matches. /// let haystack = "foo 123 ☃".as_bytes(); /// let expected = MatchError::quit(0xE2, 8); /// let got = dfa.try_search_fwd(&Input::new(haystack)); /// assert_eq!(Err(expected), got); /// /// // Another example is executing a search where the span of the haystack /// // we specify is all ASCII, but there is non-ASCII just before it. This /// // correctly also reports an error. /// let input = Input::new("β123").range(2..); /// let expected = MatchError::quit(0xB2, 1); /// let got = dfa.try_search_fwd(&input); /// assert_eq!(Err(expected), got); /// /// // And similarly for the trailing word boundary. /// let input = Input::new("123β").range(..3); /// let expected = MatchError::quit(0xCE, 3); /// let got = dfa.try_search_fwd(&input); /// assert_eq!(Err(expected), got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn unicode_word_boundary(mut self, yes: bool) -> Config { // We have a separate option for this instead of just setting the // appropriate quit bytes here because we don't want to set quit bytes // for every regex. We only want to set them when the regex contains a // Unicode word boundary. self.unicode_word_boundary = Some(yes); self } /// Add a "quit" byte to the DFA. /// /// When a quit byte is seen during search time, then search will return /// a [`MatchError::quit`](crate::MatchError::quit) error indicating the /// offset at which the search stopped. /// /// A quit byte will always overrule any other aspects of a regex. For /// example, if the `x` byte is added as a quit byte and the regex `\w` is /// used, then observing `x` will cause the search to quit immediately /// despite the fact that `x` is in the `\w` class. /// /// This mechanism is primarily useful for heuristically enabling certain /// features like Unicode word boundaries in a DFA. Namely, if the input /// to search is ASCII, then a Unicode word boundary can be implemented /// via an ASCII word boundary with no change in semantics. Thus, a DFA /// can attempt to match a Unicode word boundary but give up as soon as it /// observes a non-ASCII byte. Indeed, if callers set all non-ASCII bytes /// to be quit bytes, then Unicode word boundaries will be permitted when /// building DFAs. Of course, callers should enable /// [`Config::unicode_word_boundary`] if they want this behavior instead. /// (The advantage being that non-ASCII quit bytes will only be added if a /// Unicode word boundary is in the pattern.) /// /// When enabling this option, callers _must_ be prepared to handle a /// [`MatchError`](crate::MatchError) error during search. When using a /// [`Regex`](crate::dfa::regex::Regex), this corresponds to using the /// `try_` suite of methods. /// /// By default, there are no quit bytes set. /// /// # Panics /// /// This panics if heuristic Unicode word boundaries are enabled and any /// non-ASCII byte is removed from the set of quit bytes. Namely, enabling /// Unicode word boundaries requires setting every non-ASCII byte to a quit /// byte. So if the caller attempts to undo any of that, then this will /// panic. /// /// # Example /// /// This example shows how to cause a search to terminate if it sees a /// `\n` byte. This could be useful if, for example, you wanted to prevent /// a user supplied pattern from matching across a line boundary. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::{Automaton, dense}, Input, MatchError}; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().quit(b'\n', true)) /// .build(r"foo\p{any}+bar")?; /// /// let haystack = "foo\nbar".as_bytes(); /// // Normally this would produce a match, since \p{any} contains '\n'. /// // But since we instructed the automaton to enter a quit state if a /// // '\n' is observed, this produces a match error instead. /// let expected = MatchError::quit(b'\n', 3); /// let got = dfa.try_search_fwd(&Input::new(haystack)).unwrap_err(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn quit(mut self, byte: u8, yes: bool) -> Config { if self.get_unicode_word_boundary() && !byte.is_ascii() && !yes { panic!( "cannot set non-ASCII byte to be non-quit when \ Unicode word boundaries are enabled" ); } if self.quitset.is_none() { self.quitset = Some(ByteSet::empty()); } if yes { self.quitset.as_mut().unwrap().add(byte); } else { self.quitset.as_mut().unwrap().remove(byte); } self } /// Enable specializing start states in the DFA. /// /// When start states are specialized, an implementor of a search routine /// using a lazy DFA can tell when the search has entered a starting state. /// When start states aren't specialized, then it is impossible to know /// whether the search has entered a start state. /// /// Ideally, this option wouldn't need to exist and we could always /// specialize start states. The problem is that start states can be quite /// active. This in turn means that an efficient search routine is likely /// to ping-pong between a heavily optimized hot loop that handles most /// states and to a less optimized specialized handling of start states. /// This causes branches to get heavily mispredicted and overall can /// materially decrease throughput. Therefore, specializing start states /// should only be enabled when it is needed. /// /// Knowing whether a search is in a start state is typically useful when a /// prefilter is active for the search. A prefilter is typically only run /// when in a start state and a prefilter can greatly accelerate a search. /// Therefore, the possible cost of specializing start states is worth it /// in this case. Otherwise, if you have no prefilter, there is likely no /// reason to specialize start states. /// /// This is disabled by default, but note that it is automatically /// enabled (or disabled) if [`Config::prefilter`] is set. Namely, unless /// `specialize_start_states` has already been set, [`Config::prefilter`] /// will automatically enable or disable it based on whether a prefilter /// is present or not, respectively. This is done because a prefilter's /// effectiveness is rooted in being executed whenever the DFA is in a /// start state, and that's only possible to do when they are specialized. /// /// Note that it is plausibly reasonable to _disable_ this option /// explicitly while _enabling_ a prefilter. In that case, a prefilter /// will still be run at the beginning of a search, but never again. This /// in theory could strike a good balance if you're in a situation where a /// prefilter is likely to produce many false positive candidates. /// /// # Example /// /// This example shows how to enable start state specialization and then /// shows how to check whether a state is a start state or not. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, Input}; /// /// let dfa = DFA::builder() /// .configure(DFA::config().specialize_start_states(true)) /// .build(r"[a-z]+")?; /// /// let haystack = "123 foobar 4567".as_bytes(); /// let sid = dfa.start_state_forward(&Input::new(haystack))?; /// // The ID returned by 'start_state_forward' will always be tagged as /// // a start state when start state specialization is enabled. /// assert!(dfa.is_special_state(sid)); /// assert!(dfa.is_start_state(sid)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Compare the above with the default DFA configuration where start states /// are _not_ specialized. In this case, the start state is not tagged at /// all: /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, Input}; /// /// let dfa = DFA::new(r"[a-z]+")?; /// /// let haystack = "123 foobar 4567"; /// let sid = dfa.start_state_forward(&Input::new(haystack))?; /// // Start states are not special in the default configuration! /// assert!(!dfa.is_special_state(sid)); /// assert!(!dfa.is_start_state(sid)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn specialize_start_states(mut self, yes: bool) -> Config { self.specialize_start_states = Some(yes); self } /// Set a size limit on the total heap used by a DFA. /// /// This size limit is expressed in bytes and is applied during /// determinization of an NFA into a DFA. If the DFA's heap usage, and only /// the DFA, exceeds this configured limit, then determinization is stopped /// and an error is returned. /// /// This limit does not apply to auxiliary storage used during /// determinization that isn't part of the generated DFA. /// /// This limit is only applied during determinization. Currently, there is /// no way to post-pone this check to after minimization if minimization /// was enabled. /// /// The total limit on heap used during determinization is the sum of the /// DFA and determinization size limits. /// /// The default is no limit. /// /// # Example /// /// This example shows a DFA that fails to build because of a configured /// size limit. This particular example also serves as a cautionary tale /// demonstrating just how big DFAs with large Unicode character classes /// can get. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::{dense, Automaton}, Input}; /// /// // 6MB isn't enough! /// dense::Builder::new() /// .configure(dense::Config::new().dfa_size_limit(Some(6_000_000))) /// .build(r"\w{20}") /// .unwrap_err(); /// /// // ... but 7MB probably is! /// // (Note that DFA sizes aren't necessarily stable between releases.) /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().dfa_size_limit(Some(7_000_000))) /// .build(r"\w{20}")?; /// let haystack = "A".repeat(20).into_bytes(); /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// While one needs a little more than 6MB to represent `\w{20}`, it /// turns out that you only need a little more than 6KB to represent /// `(?-u:\w{20})`. So only use Unicode if you need it! /// /// As with [`Config::determinize_size_limit`], the size of a DFA is /// influenced by other factors, such as what start state configurations /// to support. For example, if you only need unanchored searches and not /// anchored searches, then configuring the DFA to only support unanchored /// searches can reduce its size. By default, DFAs support both unanchored /// and anchored searches. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::{dense, Automaton, StartKind}, Input}; /// /// // 3MB isn't enough! /// dense::Builder::new() /// .configure(dense::Config::new() /// .dfa_size_limit(Some(3_000_000)) /// .start_kind(StartKind::Unanchored) /// ) /// .build(r"\w{20}") /// .unwrap_err(); /// /// // ... but 4MB probably is! /// // (Note that DFA sizes aren't necessarily stable between releases.) /// let dfa = dense::Builder::new() /// .configure(dense::Config::new() /// .dfa_size_limit(Some(4_000_000)) /// .start_kind(StartKind::Unanchored) /// ) /// .build(r"\w{20}")?; /// let haystack = "A".repeat(20).into_bytes(); /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn dfa_size_limit(mut self, bytes: Option<usize>) -> Config { self.dfa_size_limit = Some(bytes); self } /// Set a size limit on the total heap used by determinization. /// /// This size limit is expressed in bytes and is applied during /// determinization of an NFA into a DFA. If the heap used for auxiliary /// storage during determinization (memory that is not in the DFA but /// necessary for building the DFA) exceeds this configured limit, then /// determinization is stopped and an error is returned. /// /// This limit does not apply to heap used by the DFA itself. /// /// The total limit on heap used during determinization is the sum of the /// DFA and determinization size limits. /// /// The default is no limit. /// /// # Example /// /// This example shows a DFA that fails to build because of a /// configured size limit on the amount of heap space used by /// determinization. This particular example complements the example for /// [`Config::dfa_size_limit`] by demonstrating that not only does Unicode /// potentially make DFAs themselves big, but it also results in more /// auxiliary storage during determinization. (Although, auxiliary storage /// is still not as much as the DFA itself.) /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 /// use regex_automata::{dfa::{dense, Automaton}, Input}; /// /// // 600KB isn't enough! /// dense::Builder::new() /// .configure(dense::Config::new() /// .determinize_size_limit(Some(600_000)) /// ) /// .build(r"\w{20}") /// .unwrap_err(); /// /// // ... but 700KB probably is! /// // (Note that auxiliary storage sizes aren't necessarily stable between /// // releases.) /// let dfa = dense::Builder::new() /// .configure(dense::Config::new() /// .determinize_size_limit(Some(700_000)) /// ) /// .build(r"\w{20}")?; /// let haystack = "A".repeat(20).into_bytes(); /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Note that some parts of the configuration on a DFA can have a /// big impact on how big the DFA is, and thus, how much memory is /// used. For example, the default setting for [`Config::start_kind`] is /// [`StartKind::Both`]. But if you only need an anchored search, for /// example, then it can be much cheaper to build a DFA that only supports /// anchored searches. (Running an unanchored search with it would panic.) /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 /// use regex_automata::{ /// dfa::{dense, Automaton, StartKind}, /// Anchored, Input, /// }; /// /// // 200KB isn't enough! /// dense::Builder::new() /// .configure(dense::Config::new() /// .determinize_size_limit(Some(200_000)) /// .start_kind(StartKind::Anchored) /// ) /// .build(r"\w{20}") /// .unwrap_err(); /// /// // ... but 300KB probably is! /// // (Note that auxiliary storage sizes aren't necessarily stable between /// // releases.) /// let dfa = dense::Builder::new() /// .configure(dense::Config::new() /// .determinize_size_limit(Some(300_000)) /// .start_kind(StartKind::Anchored) /// ) /// .build(r"\w{20}")?; /// let haystack = "A".repeat(20).into_bytes(); /// let input = Input::new(&haystack).anchored(Anchored::Yes); /// assert!(dfa.try_search_fwd(&input)?.is_some()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn determinize_size_limit(mut self, bytes: Option<usize>) -> Config { self.determinize_size_limit = Some(bytes); self } /// Returns whether this configuration has enabled simple state /// acceleration. pub fn get_accelerate(&self) -> bool { self.accelerate.unwrap_or(true) } /// Returns the prefilter attached to this configuration, if any. pub fn get_prefilter(&self) -> Option<&Prefilter> { self.pre.as_ref().unwrap_or(&None).as_ref() } /// Returns whether this configuration has enabled the expensive process /// of minimizing a DFA. pub fn get_minimize(&self) -> bool { self.minimize.unwrap_or(false) } /// Returns the match semantics set in this configuration. pub fn get_match_kind(&self) -> MatchKind { self.match_kind.unwrap_or(MatchKind::LeftmostFirst) } /// Returns the starting state configuration for a DFA. pub fn get_starts(&self) -> StartKind { self.start_kind.unwrap_or(StartKind::Both) } /// Returns whether this configuration has enabled anchored starting states /// for every pattern in the DFA. pub fn get_starts_for_each_pattern(&self) -> bool { self.starts_for_each_pattern.unwrap_or(false) } /// Returns whether this configuration has enabled byte classes or not. /// This is typically a debugging oriented option, as disabling it confers /// no speed benefit. pub fn get_byte_classes(&self) -> bool { self.byte_classes.unwrap_or(true) } /// Returns whether this configuration has enabled heuristic Unicode word /// boundary support. When enabled, it is possible for a search to return /// an error. pub fn get_unicode_word_boundary(&self) -> bool { self.unicode_word_boundary.unwrap_or(false) } /// Returns whether this configuration will instruct the DFA to enter a /// quit state whenever the given byte is seen during a search. When at /// least one byte has this enabled, it is possible for a search to return /// an error. pub fn get_quit(&self, byte: u8) -> bool { self.quitset.map_or(false, |q| q.contains(byte)) } /// Returns whether this configuration will instruct the DFA to /// "specialize" start states. When enabled, the DFA will mark start states /// as "special" so that search routines using the DFA can detect when /// it's in a start state and do some kind of optimization (like run a /// prefilter). pub fn get_specialize_start_states(&self) -> bool { self.specialize_start_states.unwrap_or(false) } /// Returns the DFA size limit of this configuration if one was set. /// The size limit is total number of bytes on the heap that a DFA is /// permitted to use. If the DFA exceeds this limit during construction, /// then construction is stopped and an error is returned. pub fn get_dfa_size_limit(&self) -> Option<usize> { self.dfa_size_limit.unwrap_or(None) } /// Returns the determinization size limit of this configuration if one /// was set. The size limit is total number of bytes on the heap that /// determinization is permitted to use. If determinization exceeds this /// limit during construction, then construction is stopped and an error is /// returned. /// /// This is different from the DFA size limit in that this only applies to /// the auxiliary storage used during determinization. Once determinization /// is complete, this memory is freed. /// /// The limit on the total heap memory used is the sum of the DFA and /// determinization size limits. pub fn get_determinize_size_limit(&self) -> Option<usize> { self.determinize_size_limit.unwrap_or(None) } /// Overwrite the default configuration such that the options in `o` are /// always used. If an option in `o` is not set, then the corresponding /// option in `self` is used. If it's not set in `self` either, then it /// remains not set. pub(crate) fn overwrite(&self, o: Config) -> Config { Config { accelerate: o.accelerate.or(self.accelerate), pre: o.pre.or_else(|| self.pre.clone()), minimize: o.minimize.or(self.minimize), match_kind: o.match_kind.or(self.match_kind), start_kind: o.start_kind.or(self.start_kind), starts_for_each_pattern: o .starts_for_each_pattern .or(self.starts_for_each_pattern), byte_classes: o.byte_classes.or(self.byte_classes), unicode_word_boundary: o .unicode_word_boundary .or(self.unicode_word_boundary), quitset: o.quitset.or(self.quitset), specialize_start_states: o .specialize_start_states .or(self.specialize_start_states), dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit), determinize_size_limit: o .determinize_size_limit .or(self.determinize_size_limit), } } } /// A builder for constructing a deterministic finite automaton from regular /// expressions. /// /// This builder provides two main things: /// /// 1. It provides a few different `build` routines for actually constructing /// a DFA from different kinds of inputs. The most convenient is /// [`Builder::build`], which builds a DFA directly from a pattern string. The /// most flexible is [`Builder::build_from_nfa`], which builds a DFA straight /// from an NFA. /// 2. The builder permits configuring a number of things. /// [`Builder::configure`] is used with [`Config`] to configure aspects of /// the DFA and the construction process itself. [`Builder::syntax`] and /// [`Builder::thompson`] permit configuring the regex parser and Thompson NFA /// construction, respectively. The syntax and thompson configurations only /// apply when building from a pattern string. /// /// This builder always constructs a *single* DFA. As such, this builder /// can only be used to construct regexes that either detect the presence /// of a match or find the end location of a match. A single DFA cannot /// produce both the start and end of a match. For that information, use a /// [`Regex`](crate::dfa::regex::Regex), which can be similarly configured /// using [`regex::Builder`](crate::dfa::regex::Builder). The main reason to /// use a DFA directly is if the end location of a match is enough for your use /// case. Namely, a `Regex` will construct two DFAs instead of one, since a /// second reverse DFA is needed to find the start of a match. /// /// Note that if one wants to build a sparse DFA, you must first build a dense /// DFA and convert that to a sparse DFA. There is no way to build a sparse /// DFA without first building a dense DFA. /// /// # Example /// /// This example shows how to build a minimized DFA that completely disables /// Unicode. That is: /// /// * Things such as `\w`, `.` and `\b` are no longer Unicode-aware. `\w` /// and `\b` are ASCII-only while `.` matches any byte except for `\n` /// (instead of any UTF-8 encoding of a Unicode scalar value except for /// `\n`). Things that are Unicode only, such as `\pL`, are not allowed. /// * The pattern itself is permitted to match invalid UTF-8. For example, /// things like `[^a]` that match any byte except for `a` are permitted. /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, dense}, /// util::syntax, /// HalfMatch, Input, /// }; /// /// let dfa = dense::Builder::new() /// .configure(dense::Config::new().minimize(false)) /// .syntax(syntax::Config::new().unicode(false).utf8(false)) /// .build(r"foo[^b]ar.*")?; /// /// let haystack = b"\xFEfoo\xFFar\xE2\x98\xFF\n"; /// let expected = Some(HalfMatch::must(0, 10)); /// let got = dfa.try_search_fwd(&Input::new(haystack))?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] #[derive(Clone, Debug)] pub struct Builder { config: Config, #[cfg(feature = "syntax")] thompson: thompson::Compiler, } #[cfg(feature = "dfa-build")] impl Builder { /// Create a new dense DFA builder with the default configuration. pub fn new() -> Builder { Builder { config: Config::default(), #[cfg(feature = "syntax")] thompson: thompson::Compiler::new(), } } /// Build a DFA from the given pattern. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(feature = "syntax")] pub fn build(&self, pattern: &str) -> Result<OwnedDFA, BuildError> { self.build_many(&[pattern]) } /// Build a DFA from the given patterns. /// /// When matches are returned, the pattern ID corresponds to the index of /// the pattern in the slice given. #[cfg(feature = "syntax")] pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<OwnedDFA, BuildError> { let nfa = self .thompson .clone() // We can always forcefully disable captures because DFAs do not // support them. .configure( thompson::Config::new() .which_captures(thompson::WhichCaptures::None), ) .build_many(patterns) .map_err(BuildError::nfa)?; self.build_from_nfa(&nfa) } /// Build a DFA from the given NFA. /// /// # Example /// /// This example shows how to build a DFA if you already have an NFA in /// hand. /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, dense}, /// nfa::thompson::NFA, /// HalfMatch, Input, /// }; /// /// let haystack = "foo123bar".as_bytes(); /// /// // This shows how to set non-default options for building an NFA. /// let nfa = NFA::compiler() /// .configure(NFA::config().shrink(true)) /// .build(r"[0-9]+")?; /// let dfa = dense::Builder::new().build_from_nfa(&nfa)?; /// let expected = Some(HalfMatch::must(0, 6)); /// let got = dfa.try_search_fwd(&Input::new(haystack))?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_from_nfa( &self, nfa: &thompson::NFA, ) -> Result<OwnedDFA, BuildError> { let mut quitset = self.config.quitset.unwrap_or(ByteSet::empty()); if self.config.get_unicode_word_boundary() && nfa.look_set_any().contains_word_unicode() { for b in 0x80..=0xFF { quitset.add(b); } } let classes = if !self.config.get_byte_classes() { // DFAs will always use the equivalence class map, but enabling // this option is useful for debugging. Namely, this will cause all // transitions to be defined over their actual bytes instead of an // opaque equivalence class identifier. The former is much easier // to grok as a human. ByteClasses::singletons() } else { let mut set = nfa.byte_class_set().clone(); // It is important to distinguish any "quit" bytes from all other // bytes. Otherwise, a non-quit byte may end up in the same class // as a quit byte, and thus cause the DFA stop when it shouldn't. // // Test case: // // regex-cli find hybrid regex -w @conn.json.1000x.log \ // '^#' '\b10\.55\.182\.100\b' if !quitset.is_empty() { set.add_set(&quitset); } set.byte_classes() }; let mut dfa = DFA::initial( classes, nfa.pattern_len(), self.config.get_starts(), nfa.look_matcher(), self.config.get_starts_for_each_pattern(), self.config.get_prefilter().map(|p| p.clone()), quitset, Flags::from_nfa(&nfa), )?; determinize::Config::new() .match_kind(self.config.get_match_kind()) .quit(quitset) .dfa_size_limit(self.config.get_dfa_size_limit()) .determinize_size_limit(self.config.get_determinize_size_limit()) .run(nfa, &mut dfa)?; if self.config.get_minimize() { dfa.minimize(); } if self.config.get_accelerate() { dfa.accelerate(); } // The state shuffling done before this point always assumes that start // states should be marked as "special," even though it isn't the // default configuration. State shuffling is complex enough as it is, // so it's simpler to just "fix" our special state ID ranges to not // include starting states after-the-fact. if !self.config.get_specialize_start_states() { dfa.special.set_no_special_start_states(); } // Look for and set the universal starting states. dfa.set_universal_starts(); Ok(dfa) } /// Apply the given dense DFA configuration options to this builder. pub fn configure(&mut self, config: Config) -> &mut Builder { self.config = self.config.overwrite(config); self } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. /// /// These settings only apply when constructing a DFA directly from a /// pattern. #[cfg(feature = "syntax")] pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { self.thompson.syntax(config); self } /// Set the Thompson NFA configuration for this builder using /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). /// /// This permits setting things like whether the DFA should match the regex /// in reverse or if additional time should be spent shrinking the size of /// the NFA. /// /// These settings only apply when constructing a DFA directly from a /// pattern. #[cfg(feature = "syntax")] pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { self.thompson.configure(config); self } } #[cfg(feature = "dfa-build")] impl Default for Builder { fn default() -> Builder { Builder::new() } } /// A convenience alias for an owned DFA. We use this particular instantiation /// a lot in this crate, so it's worth giving it a name. This instantiation /// is commonly used for mutable APIs on the DFA while building it. The main /// reason for making DFAs generic is no_std support, and more generally, /// making it possible to load a DFA from an arbitrary slice of bytes. #[cfg(feature = "alloc")] pub(crate) type OwnedDFA = DFA<alloc::vec::Vec<u32>>; /// A dense table-based deterministic finite automaton (DFA). /// /// All dense DFAs have one or more start states, zero or more match states /// and a transition table that maps the current state and the current byte /// of input to the next state. A DFA can use this information to implement /// fast searching. In particular, the use of a dense DFA generally makes the /// trade off that match speed is the most valuable characteristic, even if /// building the DFA may take significant time *and* space. (More concretely, /// building a DFA takes time and space that is exponential in the size of the /// pattern in the worst case.) As such, the processing of every byte of input /// is done with a small constant number of operations that does not vary with /// the pattern, its size or the size of the alphabet. If your needs don't line /// up with this trade off, then a dense DFA may not be an adequate solution to /// your problem. /// /// In contrast, a [`sparse::DFA`] makes the opposite /// trade off: it uses less space but will execute a variable number of /// instructions per byte at match time, which makes it slower for matching. /// (Note that space usage is still exponential in the size of the pattern in /// the worst case.) /// /// A DFA can be built using the default configuration via the /// [`DFA::new`] constructor. Otherwise, one can /// configure various aspects via [`dense::Builder`](Builder). /// /// A single DFA fundamentally supports the following operations: /// /// 1. Detection of a match. /// 2. Location of the end of a match. /// 3. In the case of a DFA with multiple patterns, which pattern matched is /// reported as well. /// /// A notable absence from the above list of capabilities is the location of /// the *start* of a match. In order to provide both the start and end of /// a match, *two* DFAs are required. This functionality is provided by a /// [`Regex`](crate::dfa::regex::Regex). /// /// # Type parameters /// /// A `DFA` has one type parameter, `T`, which is used to represent state IDs, /// pattern IDs and accelerators. `T` is typically a `Vec<u32>` or a `&[u32]`. /// /// # The `Automaton` trait /// /// This type implements the [`Automaton`] trait, which means it can be used /// for searching. For example: /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// let dfa = DFA::new("foo[0-9]+")?; /// let expected = HalfMatch::must(0, 8); /// assert_eq!(Some(expected), dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone)] pub struct DFA<T> { /// The transition table for this DFA. This includes the transitions /// themselves, along with the stride, number of states and the equivalence /// class mapping. tt: TransitionTable<T>, /// The set of starting state identifiers for this DFA. The starting state /// IDs act as pointers into the transition table. The specific starting /// state chosen for each search is dependent on the context at which the /// search begins. st: StartTable<T>, /// The set of match states and the patterns that match for each /// corresponding match state. /// /// This structure is technically only needed because of support for /// multi-regexes. Namely, multi-regexes require answering not just whether /// a match exists, but _which_ patterns match. So we need to store the /// matching pattern IDs for each match state. We do this even when there /// is only one pattern for the sake of simplicity. In practice, this uses /// up very little space for the case of one pattern. ms: MatchStates<T>, /// Information about which states are "special." Special states are states /// that are dead, quit, matching, starting or accelerated. For more info, /// see the docs for `Special`. special: Special, /// The accelerators for this DFA. /// /// If a state is accelerated, then there exist only a small number of /// bytes that can cause the DFA to leave the state. This permits searching /// to use optimized routines to find those specific bytes instead of using /// the transition table. /// /// All accelerated states exist in a contiguous range in the DFA's /// transition table. See dfa/special.rs for more details on how states are /// arranged. accels: Accels<T>, /// Any prefilter attached to this DFA. /// /// Note that currently prefilters are not serialized. When deserializing /// a DFA from bytes, this is always set to `None`. pre: Option<Prefilter>, /// The set of "quit" bytes for this DFA. /// /// This is only used when computing the start state for a particular /// position in a haystack. Namely, in the case where there is a quit /// byte immediately before the start of the search, this set needs to be /// explicitly consulted. In all other cases, quit bytes are detected by /// the DFA itself, by transitioning all quit bytes to a special "quit /// state." quitset: ByteSet, /// Various flags describing the behavior of this DFA. flags: Flags, } #[cfg(feature = "dfa-build")] impl OwnedDFA { /// Parse the given regular expression using a default configuration and /// return the corresponding DFA. /// /// If you want a non-default configuration, then use the /// [`dense::Builder`](Builder) to set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; /// /// let dfa = dense::DFA::new("foo[0-9]+bar")?; /// let expected = Some(HalfMatch::must(0, 11)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new(pattern: &str) -> Result<OwnedDFA, BuildError> { Builder::new().build(pattern) } /// Parse the given regular expressions using a default configuration and /// return the corresponding multi-DFA. /// /// If you want a non-default configuration, then use the /// [`dense::Builder`](Builder) to set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; /// /// let dfa = dense::DFA::new_many(&["[0-9]+", "[a-z]+"])?; /// let expected = Some(HalfMatch::must(1, 3)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new_many<P: AsRef<str>>( patterns: &[P], ) -> Result<OwnedDFA, BuildError> { Builder::new().build_many(patterns) } } #[cfg(feature = "dfa-build")] impl OwnedDFA { /// Create a new DFA that matches every input. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; /// /// let dfa = dense::DFA::always_match()?; /// /// let expected = Some(HalfMatch::must(0, 0)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn always_match() -> Result<OwnedDFA, BuildError> { let nfa = thompson::NFA::always_match(); Builder::new().build_from_nfa(&nfa) } /// Create a new DFA that never matches any input. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, Input}; /// /// let dfa = dense::DFA::never_match()?; /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?); /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn never_match() -> Result<OwnedDFA, BuildError> { let nfa = thompson::NFA::never_match(); Builder::new().build_from_nfa(&nfa) } /// Create an initial DFA with the given equivalence classes, pattern /// length and whether anchored starting states are enabled for each /// pattern. An initial DFA can be further mutated via determinization. fn initial( classes: ByteClasses, pattern_len: usize, starts: StartKind, lookm: &LookMatcher, starts_for_each_pattern: bool, pre: Option<Prefilter>, quitset: ByteSet, flags: Flags, ) -> Result<OwnedDFA, BuildError> { let start_pattern_len = if starts_for_each_pattern { Some(pattern_len) } else { None }; Ok(DFA { tt: TransitionTable::minimal(classes), st: StartTable::dead(starts, lookm, start_pattern_len)?, ms: MatchStates::empty(pattern_len), special: Special::new(), accels: Accels::empty(), pre, quitset, flags, }) } } #[cfg(feature = "dfa-build")] impl DFA<&[u32]> { /// Return a new default dense DFA compiler configuration. /// /// This is a convenience routine to avoid needing to import the [`Config`] /// type when customizing the construction of a dense DFA. pub fn config() -> Config { Config::new() } /// Create a new dense DFA builder with the default configuration. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. pub fn builder() -> Builder { Builder::new() } } impl<T: AsRef<[u32]>> DFA<T> { /// Cheaply return a borrowed version of this dense DFA. Specifically, /// the DFA returned always uses `&[u32]` for its transition table. pub fn as_ref(&self) -> DFA<&'_ [u32]> { DFA { tt: self.tt.as_ref(), st: self.st.as_ref(), ms: self.ms.as_ref(), special: self.special, accels: self.accels(), pre: self.pre.clone(), quitset: self.quitset, flags: self.flags, } } /// Return an owned version of this sparse DFA. Specifically, the DFA /// returned always uses `Vec<u32>` for its transition table. /// /// Effectively, this returns a dense DFA whose transition table lives on /// the heap. #[cfg(feature = "alloc")] pub fn to_owned(&self) -> OwnedDFA { DFA { tt: self.tt.to_owned(), st: self.st.to_owned(), ms: self.ms.to_owned(), special: self.special, accels: self.accels().to_owned(), pre: self.pre.clone(), quitset: self.quitset, flags: self.flags, } } /// Returns the starting state configuration for this DFA. /// /// The default is [`StartKind::Both`], which means the DFA supports both /// unanchored and anchored searches. However, this can generally lead to /// bigger DFAs. Therefore, a DFA might be compiled with support for just /// unanchored or anchored searches. In that case, running a search with /// an unsupported configuration will panic. pub fn start_kind(&self) -> StartKind { self.st.kind } /// Returns the start byte map used for computing the `Start` configuration /// at the beginning of a search. pub(crate) fn start_map(&self) -> &StartByteMap { &self.st.start_map } /// Returns true only if this DFA has starting states for each pattern. /// /// When a DFA has starting states for each pattern, then a search with the /// DFA can be configured to only look for anchored matches of a specific /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can /// accept a non-None `pattern_id` if and only if this method returns true. /// Otherwise, calling `try_search_fwd` will panic. /// /// Note that if the DFA has no patterns, this always returns false. pub fn starts_for_each_pattern(&self) -> bool { self.st.pattern_len.is_some() } /// Returns the equivalence classes that make up the alphabet for this DFA. /// /// Unless [`Config::byte_classes`] was disabled, it is possible that /// multiple distinct bytes are grouped into the same equivalence class /// if it is impossible for them to discriminate between a match and a /// non-match. This has the effect of reducing the overall alphabet size /// and in turn potentially substantially reducing the size of the DFA's /// transition table. /// /// The downside of using equivalence classes like this is that every state /// transition will automatically use this map to convert an arbitrary /// byte to its corresponding equivalence class. In practice this has a /// negligible impact on performance. pub fn byte_classes(&self) -> &ByteClasses { &self.tt.classes } /// Returns the total number of elements in the alphabet for this DFA. /// /// That is, this returns the total number of transitions that each state /// in this DFA must have. Typically, a normal byte oriented DFA would /// always have an alphabet size of 256, corresponding to the number of /// unique values in a single byte. However, this implementation has two /// peculiarities that impact the alphabet length: /// /// * Every state has a special "EOI" transition that is only followed /// after the end of some haystack is reached. This EOI transition is /// necessary to account for one byte of look-ahead when implementing /// things like `\b` and `$`. /// * Bytes are grouped into equivalence classes such that no two bytes in /// the same class can distinguish a match from a non-match. For example, /// in the regex `^[a-z]+$`, the ASCII bytes `a-z` could all be in the /// same equivalence class. This leads to a massive space savings. /// /// Note though that the alphabet length does _not_ necessarily equal the /// total stride space taken up by a single DFA state in the transition /// table. Namely, for performance reasons, the stride is always the /// smallest power of two that is greater than or equal to the alphabet /// length. For this reason, [`DFA::stride`] or [`DFA::stride2`] are /// often more useful. The alphabet length is typically useful only for /// informational purposes. pub fn alphabet_len(&self) -> usize { self.tt.alphabet_len() } /// Returns the total stride for every state in this DFA, expressed as the /// exponent of a power of 2. The stride is the amount of space each state /// takes up in the transition table, expressed as a number of transitions. /// (Unused transitions map to dead states.) /// /// The stride of a DFA is always equivalent to the smallest power of 2 /// that is greater than or equal to the DFA's alphabet length. This /// definition uses extra space, but permits faster translation between /// premultiplied state identifiers and contiguous indices (by using shifts /// instead of relying on integer division). /// /// For example, if the DFA's stride is 16 transitions, then its `stride2` /// is `4` since `2^4 = 16`. /// /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) /// while the maximum `stride2` value is `9` (corresponding to a stride of /// `512`). The maximum is not `8` since the maximum alphabet size is `257` /// when accounting for the special EOI transition. However, an alphabet /// length of that size is exceptionally rare since the alphabet is shrunk /// into equivalence classes. pub fn stride2(&self) -> usize { self.tt.stride2 } /// Returns the total stride for every state in this DFA. This corresponds /// to the total number of transitions used by each state in this DFA's /// transition table. /// /// Please see [`DFA::stride2`] for more information. In particular, this /// returns the stride as the number of transitions, where as `stride2` /// returns it as the exponent of a power of 2. pub fn stride(&self) -> usize { self.tt.stride() } /// Returns the memory usage, in bytes, of this DFA. /// /// The memory usage is computed based on the number of bytes used to /// represent this DFA. /// /// This does **not** include the stack size used up by this DFA. To /// compute that, use `std::mem::size_of::<dense::DFA>()`. pub fn memory_usage(&self) -> usize { self.tt.memory_usage() + self.st.memory_usage() + self.ms.memory_usage() + self.accels.memory_usage() } } /// Routines for converting a dense DFA to other representations, such as /// sparse DFAs or raw bytes suitable for persistent storage. impl<T: AsRef<[u32]>> DFA<T> { /// Convert this dense DFA to a sparse DFA. /// /// If a `StateID` is too small to represent all states in the sparse /// DFA, then this returns an error. In most cases, if a dense DFA is /// constructable with `StateID` then a sparse DFA will be as well. /// However, it is not guaranteed. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; /// /// let dense = dense::DFA::new("foo[0-9]+")?; /// let sparse = dense.to_sparse()?; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, sparse.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] pub fn to_sparse(&self) -> Result<sparse::DFA<Vec<u8>>, BuildError> { sparse::DFA::from_dense(self) } /// Serialize this DFA as raw bytes to a `Vec<u8>` in little endian /// format. Upon success, the `Vec<u8>` and the initial padding length are /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// The padding returned is non-zero if the returned `Vec<u8>` starts at /// an address that does not have the same alignment as `u32`. The padding /// corresponds to the number of leading bytes written to the returned /// `Vec<u8>`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA: /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // N.B. We use native endianness here to make the example work, but /// // using to_bytes_little_endian would work on a little endian target. /// let (buf, _) = original_dfa.to_bytes_native_endian(); /// // Even if buf has initial padding, DFA::from_bytes will automatically /// // ignore it. /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] pub fn to_bytes_little_endian(&self) -> (Vec<u8>, usize) { self.to_bytes::<wire::LE>() } /// Serialize this DFA as raw bytes to a `Vec<u8>` in big endian /// format. Upon success, the `Vec<u8>` and the initial padding length are /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// The padding returned is non-zero if the returned `Vec<u8>` starts at /// an address that does not have the same alignment as `u32`. The padding /// corresponds to the number of leading bytes written to the returned /// `Vec<u8>`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA: /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // N.B. We use native endianness here to make the example work, but /// // using to_bytes_big_endian would work on a big endian target. /// let (buf, _) = original_dfa.to_bytes_native_endian(); /// // Even if buf has initial padding, DFA::from_bytes will automatically /// // ignore it. /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] pub fn to_bytes_big_endian(&self) -> (Vec<u8>, usize) { self.to_bytes::<wire::BE>() } /// Serialize this DFA as raw bytes to a `Vec<u8>` in native endian /// format. Upon success, the `Vec<u8>` and the initial padding length are /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// The padding returned is non-zero if the returned `Vec<u8>` starts at /// an address that does not have the same alignment as `u32`. The padding /// corresponds to the number of leading bytes written to the returned /// `Vec<u8>`. /// /// Generally speaking, native endian format should only be used when /// you know that the target you're compiling the DFA for matches the /// endianness of the target on which you're compiling DFA. For example, /// if serialization and deserialization happen in the same process or on /// the same machine. Otherwise, when serializing a DFA for use in a /// portable environment, you'll almost certainly want to serialize _both_ /// a little endian and a big endian version and then load the correct one /// based on the target's configuration. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA: /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// let (buf, _) = original_dfa.to_bytes_native_endian(); /// // Even if buf has initial padding, DFA::from_bytes will automatically /// // ignore it. /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] pub fn to_bytes_native_endian(&self) -> (Vec<u8>, usize) { self.to_bytes::<wire::NE>() } /// The implementation of the public `to_bytes` serialization methods, /// which is generic over endianness. #[cfg(feature = "dfa-build")] fn to_bytes<E: Endian>(&self) -> (Vec<u8>, usize) { let len = self.write_to_len(); let (mut buf, padding) = wire::alloc_aligned_buffer::<u32>(len); // This should always succeed since the only possible serialization // error is providing a buffer that's too small, but we've ensured that // `buf` is big enough here. self.as_ref().write_to::<E>(&mut buf[padding..]).unwrap(); (buf, padding) } /// Serialize this DFA as raw bytes to the given slice, in little endian /// format. Upon success, the total number of bytes written to `dst` is /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// Note that unlike the various `to_byte_*` routines, this does not write /// any padding. Callers are responsible for handling alignment correctly. /// /// # Errors /// /// This returns an error if the given destination slice is not big enough /// to contain the full serialized DFA. If an error occurs, then nothing /// is written to `dst`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA without /// dynamic memory allocation. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // Create a 4KB buffer on the stack to store our serialized DFA. We /// // need to use a special type to force the alignment of our [u8; N] /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing /// // the DFA may fail because of an alignment mismatch. /// #[repr(C)] /// struct Aligned<B: ?Sized> { /// _align: [u32; 0], /// bytes: B, /// } /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; /// // N.B. We use native endianness here to make the example work, but /// // using write_to_little_endian would work on a little endian target. /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn write_to_little_endian( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { self.as_ref().write_to::<wire::LE>(dst) } /// Serialize this DFA as raw bytes to the given slice, in big endian /// format. Upon success, the total number of bytes written to `dst` is /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// Note that unlike the various `to_byte_*` routines, this does not write /// any padding. Callers are responsible for handling alignment correctly. /// /// # Errors /// /// This returns an error if the given destination slice is not big enough /// to contain the full serialized DFA. If an error occurs, then nothing /// is written to `dst`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA without /// dynamic memory allocation. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // Create a 4KB buffer on the stack to store our serialized DFA. We /// // need to use a special type to force the alignment of our [u8; N] /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing /// // the DFA may fail because of an alignment mismatch. /// #[repr(C)] /// struct Aligned<B: ?Sized> { /// _align: [u32; 0], /// bytes: B, /// } /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; /// // N.B. We use native endianness here to make the example work, but /// // using write_to_big_endian would work on a big endian target. /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn write_to_big_endian( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { self.as_ref().write_to::<wire::BE>(dst) } /// Serialize this DFA as raw bytes to the given slice, in native endian /// format. Upon success, the total number of bytes written to `dst` is /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// Generally speaking, native endian format should only be used when /// you know that the target you're compiling the DFA for matches the /// endianness of the target on which you're compiling DFA. For example, /// if serialization and deserialization happen in the same process or on /// the same machine. Otherwise, when serializing a DFA for use in a /// portable environment, you'll almost certainly want to serialize _both_ /// a little endian and a big endian version and then load the correct one /// based on the target's configuration. /// /// Note that unlike the various `to_byte_*` routines, this does not write /// any padding. Callers are responsible for handling alignment correctly. /// /// # Errors /// /// This returns an error if the given destination slice is not big enough /// to contain the full serialized DFA. If an error occurs, then nothing /// is written to `dst`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA without /// dynamic memory allocation. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // Create a 4KB buffer on the stack to store our serialized DFA. We /// // need to use a special type to force the alignment of our [u8; N] /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing /// // the DFA may fail because of an alignment mismatch. /// #[repr(C)] /// struct Aligned<B: ?Sized> { /// _align: [u32; 0], /// bytes: B, /// } /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn write_to_native_endian( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { self.as_ref().write_to::<wire::NE>(dst) } /// Return the total number of bytes required to serialize this DFA. /// /// This is useful for determining the size of the buffer required to pass /// to one of the serialization routines: /// /// * [`DFA::write_to_little_endian`] /// * [`DFA::write_to_big_endian`] /// * [`DFA::write_to_native_endian`] /// /// Passing a buffer smaller than the size returned by this method will /// result in a serialization error. Serialization routines are guaranteed /// to succeed when the buffer is big enough. /// /// # Example /// /// This example shows how to dynamically allocate enough room to serialize /// a DFA. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// let mut buf = vec![0; original_dfa.write_to_len()]; /// // This is guaranteed to succeed, because the only serialization error /// // that can occur is when the provided buffer is too small. But /// // write_to_len guarantees a correct size. /// let written = original_dfa.write_to_native_endian(&mut buf).unwrap(); /// // But this is not guaranteed to succeed! In particular, /// // deserialization requires proper alignment for &[u32], but our buffer /// // was allocated as a &[u8] whose required alignment is smaller than /// // &[u32]. However, it's likely to work in practice because of how most /// // allocators work. So if you write code like this, make sure to either /// // handle the error correctly and/or run it under Miri since Miri will /// // likely provoke the error by returning Vec<u8> buffers with alignment /// // less than &[u32]. /// let dfa: DFA<&[u32]> = match DFA::from_bytes(&buf[..written]) { /// // As mentioned above, it is legal for an error to be returned /// // here. It is quite difficult to get a Vec<u8> with a guaranteed /// // alignment equivalent to Vec<u32>. /// Err(_) => return Ok(()), /// Ok((dfa, _)) => dfa, /// }; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Note that this example isn't actually guaranteed to work! In /// particular, if `buf` is not aligned to a 4-byte boundary, then the /// `DFA::from_bytes` call will fail. If you need this to work, then you /// either need to deal with adding some initial padding yourself, or use /// one of the `to_bytes` methods, which will do it for you. pub fn write_to_len(&self) -> usize { wire::write_label_len(LABEL) + wire::write_endianness_check_len() + wire::write_version_len() + size_of::<u32>() // unused, intended for future flexibility + self.flags.write_to_len() + self.tt.write_to_len() + self.st.write_to_len() + self.ms.write_to_len() + self.special.write_to_len() + self.accels.write_to_len() + self.quitset.write_to_len() } } impl<'a> DFA<&'a [u32]> { /// Safely deserialize a DFA with a specific state identifier /// representation. Upon success, this returns both the deserialized DFA /// and the number of bytes read from the given slice. Namely, the contents /// of the slice beyond the DFA are not read. /// /// Deserializing a DFA using this routine will never allocate heap memory. /// For safety purposes, the DFA's transition table will be verified such /// that every transition points to a valid state. If this verification is /// too costly, then a [`DFA::from_bytes_unchecked`] API is provided, which /// will always execute in constant time. /// /// The bytes given must be generated by one of the serialization APIs /// of a `DFA` using a semver compatible release of this crate. Those /// include: /// /// * [`DFA::to_bytes_little_endian`] /// * [`DFA::to_bytes_big_endian`] /// * [`DFA::to_bytes_native_endian`] /// * [`DFA::write_to_little_endian`] /// * [`DFA::write_to_big_endian`] /// * [`DFA::write_to_native_endian`] /// /// The `to_bytes` methods allocate and return a `Vec<u8>` for you, along /// with handling alignment correctly. The `write_to` methods do not /// allocate and write to an existing slice (which may be on the stack). /// Since deserialization always uses the native endianness of the target /// platform, the serialization API you use should match the endianness of /// the target platform. (It's often a good idea to generate serialized /// DFAs for both forms of endianness and then load the correct one based /// on endianness.) /// /// # Errors /// /// Generally speaking, it's easier to state the conditions in which an /// error is _not_ returned. All of the following must be true: /// /// * The bytes given must be produced by one of the serialization APIs /// on this DFA, as mentioned above. /// * The endianness of the target platform matches the endianness used to /// serialized the provided DFA. /// * The slice given must have the same alignment as `u32`. /// /// If any of the above are not true, then an error will be returned. /// /// # Panics /// /// This routine will never panic for any input. /// /// # Example /// /// This example shows how to serialize a DFA to raw bytes, deserialize it /// and then use it for searching. /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// let initial = DFA::new("foo[0-9]+")?; /// let (bytes, _) = initial.to_bytes_native_endian(); /// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: dealing with alignment and padding /// /// In the above example, we used the `to_bytes_native_endian` method to /// serialize a DFA, but we ignored part of its return value corresponding /// to padding added to the beginning of the serialized DFA. This is OK /// because deserialization will skip this initial padding. What matters /// is that the address immediately following the padding has an alignment /// that matches `u32`. That is, the following is an equivalent but /// alternative way to write the above example: /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// let initial = DFA::new("foo[0-9]+")?; /// // Serialization returns the number of leading padding bytes added to /// // the returned Vec<u8>. /// let (bytes, pad) = initial.to_bytes_native_endian(); /// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes[pad..])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This padding is necessary because Rust's standard library does /// not expose any safe and robust way of creating a `Vec<u8>` with a /// guaranteed alignment other than 1. Now, in practice, the underlying /// allocator is likely to provide a `Vec<u8>` that meets our alignment /// requirements, which means `pad` is zero in practice most of the time. /// /// The purpose of exposing the padding like this is flexibility for the /// caller. For example, if one wants to embed a serialized DFA into a /// compiled program, then it's important to guarantee that it starts at a /// `u32`-aligned address. The simplest way to do this is to discard the /// padding bytes and set it up so that the serialized DFA itself begins at /// a properly aligned address. We can show this in two parts. The first /// part is serializing the DFA to a file: /// /// ```no_run /// use regex_automata::dfa::dense::DFA; /// /// let dfa = DFA::new("foo[0-9]+")?; /// /// let (bytes, pad) = dfa.to_bytes_big_endian(); /// // Write the contents of the DFA *without* the initial padding. /// std::fs::write("foo.bigendian.dfa", &bytes[pad..])?; /// /// // Do it again, but this time for little endian. /// let (bytes, pad) = dfa.to_bytes_little_endian(); /// std::fs::write("foo.littleendian.dfa", &bytes[pad..])?; /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And now the second part is embedding the DFA into the compiled program /// and deserializing it at runtime on first use. We use conditional /// compilation to choose the correct endianness. /// /// ```no_run /// use regex_automata::{ /// dfa::{Automaton, dense::DFA}, /// util::{lazy::Lazy, wire::AlignAs}, /// HalfMatch, Input, /// }; /// /// // This crate provides its own "lazy" type, kind of like /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc /// // no-std environments and let's us write this using completely /// // safe code. /// static RE: Lazy<DFA<&'static [u32]>> = Lazy::new(|| { /// # const _: &str = stringify! { /// // This assignment is made possible (implicitly) via the /// // CoerceUnsized trait. This is what guarantees that our /// // bytes are stored in memory on a 4 byte boundary. You /// // *must* do this or something equivalent for correct /// // deserialization. /// static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { /// _align: [], /// #[cfg(target_endian = "big")] /// bytes: *include_bytes!("foo.bigendian.dfa"), /// #[cfg(target_endian = "little")] /// bytes: *include_bytes!("foo.littleendian.dfa"), /// }; /// # }; /// # static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { /// # _align: [], /// # bytes: [], /// # }; /// /// let (dfa, _) = DFA::from_bytes(&ALIGNED.bytes) /// .expect("serialized DFA should be valid"); /// dfa /// }); /// /// let expected = Ok(Some(HalfMatch::must(0, 8))); /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345"))); /// ``` /// /// An alternative to [`util::lazy::Lazy`](crate::util::lazy::Lazy) /// is [`lazy_static`](https://crates.io/crates/lazy_static) or /// [`once_cell`](https://crates.io/crates/once_cell), which provide /// stronger guarantees (like the initialization function only being /// executed once). And `once_cell` in particular provides a more /// expressive API. But a `Lazy` value from this crate is likely just fine /// in most circumstances. /// /// Note that regardless of which initialization method you use, you /// will still need to use the [`AlignAs`](crate::util::wire::AlignAs) /// trick above to force correct alignment, but this is safe to do and /// `from_bytes` will return an error if you get it wrong. pub fn from_bytes( slice: &'a [u8], ) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> { // SAFETY: This is safe because we validate the transition table, start // table, match states and accelerators below. If any validation fails, // then we return an error. let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? }; dfa.tt.validate(&dfa.special)?; dfa.st.validate(&dfa.tt)?; dfa.ms.validate(&dfa)?; dfa.accels.validate()?; // N.B. dfa.special doesn't have a way to do unchecked deserialization, // so it has already been validated. Ok((dfa, nread)) } /// Deserialize a DFA with a specific state identifier representation in /// constant time by omitting the verification of the validity of the /// transition table and other data inside the DFA. /// /// This is just like [`DFA::from_bytes`], except it can potentially return /// a DFA that exhibits undefined behavior if its transition table contains /// invalid state identifiers. /// /// This routine is useful if you need to deserialize a DFA cheaply /// and cannot afford the transition table validation performed by /// `from_bytes`. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; /// /// let initial = DFA::new("foo[0-9]+")?; /// let (bytes, _) = initial.to_bytes_native_endian(); /// // SAFETY: This is guaranteed to be safe since the bytes given come /// // directly from a compatible serialization routine. /// let dfa: DFA<&[u32]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 }; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub unsafe fn from_bytes_unchecked( slice: &'a [u8], ) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> { let mut nr = 0; nr += wire::skip_initial_padding(slice); wire::check_alignment::<StateID>(&slice[nr..])?; nr += wire::read_label(&slice[nr..], LABEL)?; nr += wire::read_endianness_check(&slice[nr..])?; nr += wire::read_version(&slice[nr..], VERSION)?; let _unused = wire::try_read_u32(&slice[nr..], "unused space")?; nr += size_of::<u32>(); let (flags, nread) = Flags::from_bytes(&slice[nr..])?; nr += nread; let (tt, nread) = TransitionTable::from_bytes_unchecked(&slice[nr..])?; nr += nread; let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?; nr += nread; let (ms, nread) = MatchStates::from_bytes_unchecked(&slice[nr..])?; nr += nread; let (special, nread) = Special::from_bytes(&slice[nr..])?; nr += nread; special.validate_state_len(tt.len(), tt.stride2)?; let (accels, nread) = Accels::from_bytes_unchecked(&slice[nr..])?; nr += nread; let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?; nr += nread; // Prefilters don't support serialization, so they're always absent. let pre = None; Ok((DFA { tt, st, ms, special, accels, pre, quitset, flags }, nr)) } /// The implementation of the public `write_to` serialization methods, /// which is generic over endianness. /// /// This is defined only for &[u32] to reduce binary size/compilation time. fn write_to<E: Endian>( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("dense DFA")); } dst = &mut dst[..nwrite]; let mut nw = 0; nw += wire::write_label(LABEL, &mut dst[nw..])?; nw += wire::write_endianness_check::<E>(&mut dst[nw..])?; nw += wire::write_version::<E>(VERSION, &mut dst[nw..])?; nw += { // Currently unused, intended for future flexibility E::write_u32(0, &mut dst[nw..]); size_of::<u32>() }; nw += self.flags.write_to::<E>(&mut dst[nw..])?; nw += self.tt.write_to::<E>(&mut dst[nw..])?; nw += self.st.write_to::<E>(&mut dst[nw..])?; nw += self.ms.write_to::<E>(&mut dst[nw..])?; nw += self.special.write_to::<E>(&mut dst[nw..])?; nw += self.accels.write_to::<E>(&mut dst[nw..])?; nw += self.quitset.write_to::<E>(&mut dst[nw..])?; Ok(nw) } } // The following methods implement mutable routines on the internal // representation of a DFA. As such, we must fix the first type parameter to a // `Vec<u32>` since a generic `T: AsRef<[u32]>` does not permit mutation. We // can get away with this because these methods are internal to the crate and // are exclusively used during construction of the DFA. #[cfg(feature = "dfa-build")] impl OwnedDFA { /// Add a start state of this DFA. pub(crate) fn set_start_state( &mut self, anchored: Anchored, start: Start, id: StateID, ) { assert!(self.tt.is_valid(id), "invalid start state"); self.st.set_start(anchored, start, id); } /// Set the given transition to this DFA. Both the `from` and `to` states /// must already exist. pub(crate) fn set_transition( &mut self, from: StateID, byte: alphabet::Unit, to: StateID, ) { self.tt.set(from, byte, to); } /// An an empty state (a state where all transitions lead to a dead state) /// and return its identifier. The identifier returned is guaranteed to /// not point to any other existing state. /// /// If adding a state would exceed `StateID::LIMIT`, then this returns an /// error. pub(crate) fn add_empty_state(&mut self) -> Result<StateID, BuildError> { self.tt.add_empty_state() } /// Swap the two states given in the transition table. /// /// This routine does not do anything to check the correctness of this /// swap. Callers must ensure that other states pointing to id1 and id2 are /// updated appropriately. pub(crate) fn swap_states(&mut self, id1: StateID, id2: StateID) { self.tt.swap(id1, id2); } /// Remap all of the state identifiers in this DFA according to the map /// function given. This includes all transitions and all starting state /// identifiers. pub(crate) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { // We could loop over each state ID and call 'remap_state' here, but // this is more direct: just map every transition directly. This // technically might do a little extra work since the alphabet length // is likely less than the stride, but if that is indeed an issue we // should benchmark it and fix it. for sid in self.tt.table_mut().iter_mut() { *sid = map(*sid); } for sid in self.st.table_mut().iter_mut() { *sid = map(*sid); } } /// Remap the transitions for the state given according to the function /// given. This applies the given map function to every transition in the /// given state and changes the transition in place to the result of the /// map function for that transition. pub(crate) fn remap_state( &mut self, id: StateID, map: impl Fn(StateID) -> StateID, ) { self.tt.remap(id, map); } /// Truncate the states in this DFA to the given length. /// /// This routine does not do anything to check the correctness of this /// truncation. Callers must ensure that other states pointing to truncated /// states are updated appropriately. pub(crate) fn truncate_states(&mut self, len: usize) { self.tt.truncate(len); } /// Minimize this DFA in place using Hopcroft's algorithm. pub(crate) fn minimize(&mut self) { Minimizer::new(self).run(); } /// Updates the match state pattern ID map to use the one provided. /// /// This is useful when it's convenient to manipulate matching states /// (and their corresponding pattern IDs) as a map. In particular, the /// representation used by a DFA for this map is not amenable to mutation, /// so if things need to be changed (like when shuffling states), it's /// often easier to work with the map form. pub(crate) fn set_pattern_map( &mut self, map: &BTreeMap<StateID, Vec<PatternID>>, ) -> Result<(), BuildError> { self.ms = self.ms.new_with_map(map)?; Ok(()) } /// Find states that have a small number of non-loop transitions and mark /// them as candidates for acceleration during search. pub(crate) fn accelerate(&mut self) { // dead and quit states can never be accelerated. if self.state_len() <= 2 { return; } // Go through every state and record their accelerator, if possible. let mut accels = BTreeMap::new(); // Count the number of accelerated match, start and non-match/start // states. let (mut cmatch, mut cstart, mut cnormal) = (0, 0, 0); for state in self.states() { if let Some(accel) = state.accelerate(self.byte_classes()) { debug!( "accelerating full DFA state {}: {:?}", state.id().as_usize(), accel, ); accels.insert(state.id(), accel); if self.is_match_state(state.id()) { cmatch += 1; } else if self.is_start_state(state.id()) { cstart += 1; } else { assert!(!self.is_dead_state(state.id())); assert!(!self.is_quit_state(state.id())); cnormal += 1; } } } // If no states were able to be accelerated, then we're done. if accels.is_empty() { return; } let original_accels_len = accels.len(); // A remapper keeps track of state ID changes. Once we're done // shuffling, the remapper is used to rewrite all transitions in the // DFA based on the new positions of states. let mut remapper = Remapper::new(self); // As we swap states, if they are match states, we need to swap their // pattern ID lists too (for multi-regexes). We do this by converting // the lists to an easily swappable map, and then convert back to // MatchStates once we're done. let mut new_matches = self.ms.to_map(self); // There is at least one state that gets accelerated, so these are // guaranteed to get set to sensible values below. self.special.min_accel = StateID::MAX; self.special.max_accel = StateID::ZERO; let update_special_accel = |special: &mut Special, accel_id: StateID| { special.min_accel = cmp::min(special.min_accel, accel_id); special.max_accel = cmp::max(special.max_accel, accel_id); }; // Start by shuffling match states. Any match states that are // accelerated get moved to the end of the match state range. if cmatch > 0 && self.special.matches() { // N.B. special.{min,max}_match do not need updating, since the // range/number of match states does not change. Only the ordering // of match states may change. let mut next_id = self.special.max_match; let mut cur_id = next_id; while cur_id >= self.special.min_match { if let Some(accel) = accels.remove(&cur_id) { accels.insert(next_id, accel); update_special_accel(&mut self.special, next_id); // No need to do any actual swapping for equivalent IDs. if cur_id != next_id { remapper.swap(self, cur_id, next_id); // Swap pattern IDs for match states. let cur_pids = new_matches.remove(&cur_id).unwrap(); let next_pids = new_matches.remove(&next_id).unwrap(); new_matches.insert(cur_id, next_pids); new_matches.insert(next_id, cur_pids); } next_id = self.tt.prev_state_id(next_id); } cur_id = self.tt.prev_state_id(cur_id); } } // This is where it gets tricky. Without acceleration, start states // normally come right after match states. But we want accelerated // states to be a single contiguous range (to make it very fast // to determine whether a state *is* accelerated), while also keeping // match and starting states as contiguous ranges for the same reason. // So what we do here is shuffle states such that it looks like this: // // DQMMMMAAAAASSSSSSNNNNNNN // | | // |---------| // accelerated states // // Where: // D - dead state // Q - quit state // M - match state (may be accelerated) // A - normal state that is accelerated // S - start state (may be accelerated) // N - normal state that is NOT accelerated // // We implement this by shuffling states, which is done by a sequence // of pairwise swaps. We start by looking at all normal states to be // accelerated. When we find one, we swap it with the earliest starting // state, and then swap that with the earliest normal state. This // preserves the contiguous property. // // Once we're done looking for accelerated normal states, now we look // for accelerated starting states by moving them to the beginning // of the starting state range (just like we moved accelerated match // states to the end of the matching state range). // // For a more detailed/different perspective on this, see the docs // in dfa/special.rs. if cnormal > 0 { // our next available starting and normal states for swapping. let mut next_start_id = self.special.min_start; let mut cur_id = self.to_state_id(self.state_len() - 1); // This is guaranteed to exist since cnormal > 0. let mut next_norm_id = self.tt.next_state_id(self.special.max_start); while cur_id >= next_norm_id { if let Some(accel) = accels.remove(&cur_id) { remapper.swap(self, next_start_id, cur_id); remapper.swap(self, next_norm_id, cur_id); // Keep our accelerator map updated with new IDs if the // states we swapped were also accelerated. if let Some(accel2) = accels.remove(&next_norm_id) { accels.insert(cur_id, accel2); } if let Some(accel2) = accels.remove(&next_start_id) { accels.insert(next_norm_id, accel2); } accels.insert(next_start_id, accel); update_special_accel(&mut self.special, next_start_id); // Our start range shifts one to the right now. self.special.min_start = self.tt.next_state_id(self.special.min_start); self.special.max_start = self.tt.next_state_id(self.special.max_start); next_start_id = self.tt.next_state_id(next_start_id); next_norm_id = self.tt.next_state_id(next_norm_id); } // This is pretty tricky, but if our 'next_norm_id' state also // happened to be accelerated, then the result is that it is // now in the position of cur_id, so we need to consider it // again. This loop is still guaranteed to terminate though, // because when accels contains cur_id, we're guaranteed to // increment next_norm_id even if cur_id remains unchanged. if !accels.contains_key(&cur_id) { cur_id = self.tt.prev_state_id(cur_id); } } } // Just like we did for match states, but we want to move accelerated // start states to the beginning of the range instead of the end. if cstart > 0 { // N.B. special.{min,max}_start do not need updating, since the // range/number of start states does not change at this point. Only // the ordering of start states may change. let mut next_id = self.special.min_start; let mut cur_id = next_id; while cur_id <= self.special.max_start { if let Some(accel) = accels.remove(&cur_id) { remapper.swap(self, cur_id, next_id); accels.insert(next_id, accel); update_special_accel(&mut self.special, next_id); next_id = self.tt.next_state_id(next_id); } cur_id = self.tt.next_state_id(cur_id); } } // Remap all transitions in our DFA and assert some things. remapper.remap(self); // This unwrap is OK because acceleration never changes the number of // match states or patterns in those match states. Since acceleration // runs after the pattern map has been set at least once, we know that // our match states cannot error. self.set_pattern_map(&new_matches).unwrap(); self.special.set_max(); self.special.validate().expect("special state ranges should validate"); self.special .validate_state_len(self.state_len(), self.stride2()) .expect( "special state ranges should be consistent with state length", ); assert_eq!( self.special.accel_len(self.stride()), // We record the number of accelerated states initially detected // since the accels map is itself mutated in the process above. // If mutated incorrectly, its size may change, and thus can't be // trusted as a source of truth of how many accelerated states we // expected there to be. original_accels_len, "mismatch with expected number of accelerated states", ); // And finally record our accelerators. We kept our accels map updated // as we shuffled states above, so the accelerators should now // correspond to a contiguous range in the state ID space. (Which we // assert.) let mut prev: Option<StateID> = None; for (id, accel) in accels { assert!(prev.map_or(true, |p| self.tt.next_state_id(p) == id)); prev = Some(id); self.accels.add(accel); } } /// Shuffle the states in this DFA so that starting states, match /// states and accelerated states are all contiguous. /// /// See dfa/special.rs for more details. pub(crate) fn shuffle( &mut self, mut matches: BTreeMap<StateID, Vec<PatternID>>, ) -> Result<(), BuildError> { // The determinizer always adds a quit state and it is always second. self.special.quit_id = self.to_state_id(1); // If all we have are the dead and quit states, then we're done and // the DFA will never produce a match. if self.state_len() <= 2 { self.special.set_max(); return Ok(()); } // Collect all our non-DEAD start states into a convenient set and // confirm there is no overlap with match states. In the classicl DFA // construction, start states can be match states. But because of // look-around, we delay all matches by a byte, which prevents start // states from being match states. let mut is_start: BTreeSet<StateID> = BTreeSet::new(); for (start_id, _, _) in self.starts() { // If a starting configuration points to a DEAD state, then we // don't want to shuffle it. The DEAD state is always the first // state with ID=0. So we can just leave it be. if start_id == DEAD { continue; } assert!( !matches.contains_key(&start_id), "{:?} is both a start and a match state, which is not allowed", start_id, ); is_start.insert(start_id); } // We implement shuffling by a sequence of pairwise swaps of states. // Since we have a number of things referencing states via their // IDs and swapping them changes their IDs, we need to record every // swap we make so that we can remap IDs. The remapper handles this // book-keeping for us. let mut remapper = Remapper::new(self); // Shuffle matching states. if matches.is_empty() { self.special.min_match = DEAD; self.special.max_match = DEAD; } else { // The determinizer guarantees that the first two states are the // dead and quit states, respectively. We want our match states to // come right after quit. let mut next_id = self.to_state_id(2); let mut new_matches = BTreeMap::new(); self.special.min_match = next_id; for (id, pids) in matches { remapper.swap(self, next_id, id); new_matches.insert(next_id, pids); // If we swapped a start state, then update our set. if is_start.contains(&next_id) { is_start.remove(&next_id); is_start.insert(id); } next_id = self.tt.next_state_id(next_id); } matches = new_matches; self.special.max_match = cmp::max( self.special.min_match, self.tt.prev_state_id(next_id), ); } // Shuffle starting states. { let mut next_id = self.to_state_id(2); if self.special.matches() { next_id = self.tt.next_state_id(self.special.max_match); } self.special.min_start = next_id; for id in is_start { remapper.swap(self, next_id, id); next_id = self.tt.next_state_id(next_id); } self.special.max_start = cmp::max( self.special.min_start, self.tt.prev_state_id(next_id), ); } // Finally remap all transitions in our DFA. remapper.remap(self); self.set_pattern_map(&matches)?; self.special.set_max(); self.special.validate().expect("special state ranges should validate"); self.special .validate_state_len(self.state_len(), self.stride2()) .expect( "special state ranges should be consistent with state length", ); Ok(()) } /// Checks whether there are universal start states (both anchored and /// unanchored), and if so, sets the relevant fields to the start state /// IDs. /// /// Universal start states occur precisely when the all patterns in the /// DFA have no look-around assertions in their prefix. fn set_universal_starts(&mut self) { assert_eq!(6, Start::len(), "expected 6 start configurations"); let start_id = |dfa: &mut OwnedDFA, inp: &Input<'_>, start: Start| { // This OK because we only call 'start' under conditions // in which we know it will succeed. dfa.st.start(inp, start).expect("valid Input configuration") }; if self.start_kind().has_unanchored() { let inp = Input::new("").anchored(Anchored::No); let sid = start_id(self, &inp, Start::NonWordByte); if sid == start_id(self, &inp, Start::WordByte) && sid == start_id(self, &inp, Start::Text) && sid == start_id(self, &inp, Start::LineLF) && sid == start_id(self, &inp, Start::LineCR) && sid == start_id(self, &inp, Start::CustomLineTerminator) { self.st.universal_start_unanchored = Some(sid); } } if self.start_kind().has_anchored() { let inp = Input::new("").anchored(Anchored::Yes); let sid = start_id(self, &inp, Start::NonWordByte); if sid == start_id(self, &inp, Start::WordByte) && sid == start_id(self, &inp, Start::Text) && sid == start_id(self, &inp, Start::LineLF) && sid == start_id(self, &inp, Start::LineCR) && sid == start_id(self, &inp, Start::CustomLineTerminator) { self.st.universal_start_anchored = Some(sid); } } } } // A variety of generic internal methods for accessing DFA internals. impl<T: AsRef<[u32]>> DFA<T> { /// Return the info about special states. pub(crate) fn special(&self) -> &Special { &self.special } /// Return the info about special states as a mutable borrow. #[cfg(feature = "dfa-build")] pub(crate) fn special_mut(&mut self) -> &mut Special { &mut self.special } /// Returns the quit set (may be empty) used by this DFA. pub(crate) fn quitset(&self) -> &ByteSet { &self.quitset } /// Returns the flags for this DFA. pub(crate) fn flags(&self) -> &Flags { &self.flags } /// Returns an iterator over all states in this DFA. /// /// This iterator yields a tuple for each state. The first element of the /// tuple corresponds to a state's identifier, and the second element /// corresponds to the state itself (comprised of its transitions). pub(crate) fn states(&self) -> StateIter<'_, T> { self.tt.states() } /// Return the total number of states in this DFA. Every DFA has at least /// 1 state, even the empty DFA. pub(crate) fn state_len(&self) -> usize { self.tt.len() } /// Return an iterator over all pattern IDs for the given match state. /// /// If the given state is not a match state, then this panics. #[cfg(feature = "dfa-build")] pub(crate) fn pattern_id_slice(&self, id: StateID) -> &[PatternID] { assert!(self.is_match_state(id)); self.ms.pattern_id_slice(self.match_state_index(id)) } /// Return the total number of pattern IDs for the given match state. /// /// If the given state is not a match state, then this panics. pub(crate) fn match_pattern_len(&self, id: StateID) -> usize { assert!(self.is_match_state(id)); self.ms.pattern_len(self.match_state_index(id)) } /// Returns the total number of patterns matched by this DFA. pub(crate) fn pattern_len(&self) -> usize { self.ms.pattern_len } /// Returns a map from match state ID to a list of pattern IDs that match /// in that state. #[cfg(feature = "dfa-build")] pub(crate) fn pattern_map(&self) -> BTreeMap<StateID, Vec<PatternID>> { self.ms.to_map(self) } /// Returns the ID of the quit state for this DFA. #[cfg(feature = "dfa-build")] pub(crate) fn quit_id(&self) -> StateID { self.to_state_id(1) } /// Convert the given state identifier to the state's index. The state's /// index corresponds to the position in which it appears in the transition /// table. When a DFA is NOT premultiplied, then a state's identifier is /// also its index. When a DFA is premultiplied, then a state's identifier /// is equal to `index * alphabet_len`. This routine reverses that. pub(crate) fn to_index(&self, id: StateID) -> usize { self.tt.to_index(id) } /// Convert an index to a state (in the range 0..self.state_len()) to an /// actual state identifier. /// /// This is useful when using a `Vec<T>` as an efficient map keyed by state /// to some other information (such as a remapped state ID). #[cfg(feature = "dfa-build")] pub(crate) fn to_state_id(&self, index: usize) -> StateID { self.tt.to_state_id(index) } /// Return the table of state IDs for this DFA's start states. pub(crate) fn starts(&self) -> StartStateIter<'_> { self.st.iter() } /// Returns the index of the match state for the given ID. If the /// given ID does not correspond to a match state, then this may /// panic or produce an incorrect result. #[cfg_attr(feature = "perf-inline", inline(always))] fn match_state_index(&self, id: StateID) -> usize { debug_assert!(self.is_match_state(id)); // This is one of the places where we rely on the fact that match // states are contiguous in the transition table. Namely, that the // first match state ID always corresponds to dfa.special.min_match. // From there, since we know the stride, we can compute the overall // index of any match state given the match state's ID. let min = self.special().min_match.as_usize(); // CORRECTNESS: We're allowed to produce an incorrect result or panic, // so both the subtraction and the unchecked StateID construction is // OK. self.to_index(StateID::new_unchecked(id.as_usize() - min)) } /// Returns the index of the accelerator state for the given ID. If the /// given ID does not correspond to an accelerator state, then this may /// panic or produce an incorrect result. fn accelerator_index(&self, id: StateID) -> usize { let min = self.special().min_accel.as_usize(); // CORRECTNESS: We're allowed to produce an incorrect result or panic, // so both the subtraction and the unchecked StateID construction is // OK. self.to_index(StateID::new_unchecked(id.as_usize() - min)) } /// Return the accelerators for this DFA. fn accels(&self) -> Accels<&[u32]> { self.accels.as_ref() } /// Return this DFA's transition table as a slice. fn trans(&self) -> &[StateID] { self.tt.table() } } impl<T: AsRef<[u32]>> fmt::Debug for DFA<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "dense::DFA(")?; for state in self.states() { fmt_state_indicator(f, self, state.id())?; let id = if f.alternate() { state.id().as_usize() } else { self.to_index(state.id()) }; write!(f, "{:06?}: ", id)?; state.fmt(f)?; write!(f, "\n")?; } writeln!(f, "")?; for (i, (start_id, anchored, sty)) in self.starts().enumerate() { let id = if f.alternate() { start_id.as_usize() } else { self.to_index(start_id) }; if i % self.st.stride == 0 { match anchored { Anchored::No => writeln!(f, "START-GROUP(unanchored)")?, Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?, Anchored::Pattern(pid) => { writeln!(f, "START_GROUP(pattern: {:?})", pid)? } } } writeln!(f, " {:?} => {:06?}", sty, id)?; } if self.pattern_len() > 1 { writeln!(f, "")?; for i in 0..self.ms.len() { let id = self.ms.match_state_id(self, i); let id = if f.alternate() { id.as_usize() } else { self.to_index(id) }; write!(f, "MATCH({:06?}): ", id)?; for (i, &pid) in self.ms.pattern_id_slice(i).iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{:?}", pid)?; } writeln!(f, "")?; } } writeln!(f, "state length: {:?}", self.state_len())?; writeln!(f, "pattern length: {:?}", self.pattern_len())?; writeln!(f, "flags: {:?}", self.flags)?; writeln!(f, ")")?; Ok(()) } } // SAFETY: We assert that our implementation of each method is correct. unsafe impl<T: AsRef<[u32]>> Automaton for DFA<T> { #[cfg_attr(feature = "perf-inline", inline(always))] fn is_special_state(&self, id: StateID) -> bool { self.special.is_special_state(id) } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_dead_state(&self, id: StateID) -> bool { self.special.is_dead_state(id) } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_quit_state(&self, id: StateID) -> bool { self.special.is_quit_state(id) } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_match_state(&self, id: StateID) -> bool { self.special.is_match_state(id) } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_start_state(&self, id: StateID) -> bool { self.special.is_start_state(id) } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_accel_state(&self, id: StateID) -> bool { self.special.is_accel_state(id) } #[cfg_attr(feature = "perf-inline", inline(always))] fn next_state(&self, current: StateID, input: u8) -> StateID { let input = self.byte_classes().get(input); let o = current.as_usize() + usize::from(input); self.trans()[o] } #[cfg_attr(feature = "perf-inline", inline(always))] unsafe fn next_state_unchecked( &self, current: StateID, byte: u8, ) -> StateID { // We don't (or shouldn't) need an unchecked variant for the byte // class mapping, since bound checks should be omitted automatically // by virtue of its representation. If this ends up not being true as // confirmed by codegen, please file an issue. ---AG let class = self.byte_classes().get(byte); let o = current.as_usize() + usize::from(class); let next = *self.trans().get_unchecked(o); next } #[cfg_attr(feature = "perf-inline", inline(always))] fn next_eoi_state(&self, current: StateID) -> StateID { let eoi = self.byte_classes().eoi().as_usize(); let o = current.as_usize() + eoi; self.trans()[o] } #[cfg_attr(feature = "perf-inline", inline(always))] fn pattern_len(&self) -> usize { self.ms.pattern_len } #[cfg_attr(feature = "perf-inline", inline(always))] fn match_len(&self, id: StateID) -> usize { self.match_pattern_len(id) } #[cfg_attr(feature = "perf-inline", inline(always))] fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID { // This is an optimization for the very common case of a DFA with a // single pattern. This conditional avoids a somewhat more costly path // that finds the pattern ID from the state machine, which requires // a bit of slicing/pointer-chasing. This optimization tends to only // matter when matches are frequent. if self.ms.pattern_len == 1 { return PatternID::ZERO; } let state_index = self.match_state_index(id); self.ms.pattern_id(state_index, match_index) } #[cfg_attr(feature = "perf-inline", inline(always))] fn has_empty(&self) -> bool { self.flags.has_empty } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_utf8(&self) -> bool { self.flags.is_utf8 } #[cfg_attr(feature = "perf-inline", inline(always))] fn is_always_start_anchored(&self) -> bool { self.flags.is_always_start_anchored } #[cfg_attr(feature = "perf-inline", inline(always))] fn start_state_forward( &self, input: &Input<'_>, ) -> Result<StateID, MatchError> { if !self.quitset.is_empty() && input.start() > 0 { let offset = input.start() - 1; let byte = input.haystack()[offset]; if self.quitset.contains(byte) { return Err(MatchError::quit(byte, offset)); } } let start = self.st.start_map.fwd(&input); self.st.start(input, start) } #[cfg_attr(feature = "perf-inline", inline(always))] fn start_state_reverse( &self, input: &Input<'_>, ) -> Result<StateID, MatchError> { if !self.quitset.is_empty() && input.end() < input.haystack().len() { let offset = input.end(); let byte = input.haystack()[offset]; if self.quitset.contains(byte) { return Err(MatchError::quit(byte, offset)); } } let start = self.st.start_map.rev(&input); self.st.start(input, start) } #[cfg_attr(feature = "perf-inline", inline(always))] fn universal_start_state(&self, mode: Anchored) -> Option<StateID> { match mode { Anchored::No => self.st.universal_start_unanchored, Anchored::Yes => self.st.universal_start_anchored, Anchored::Pattern(_) => None, } } #[cfg_attr(feature = "perf-inline", inline(always))] fn accelerator(&self, id: StateID) -> &[u8] { if !self.is_accel_state(id) { return &[]; } self.accels.needles(self.accelerator_index(id)) } #[cfg_attr(feature = "perf-inline", inline(always))] fn get_prefilter(&self) -> Option<&Prefilter> { self.pre.as_ref() } } /// The transition table portion of a dense DFA. /// /// The transition table is the core part of the DFA in that it describes how /// to move from one state to another based on the input sequence observed. #[derive(Clone)] pub(crate) struct TransitionTable<T> { /// A contiguous region of memory representing the transition table in /// row-major order. The representation is dense. That is, every state /// has precisely the same number of transitions. The maximum number of /// transitions per state is 257 (256 for each possible byte value, plus 1 /// for the special EOI transition). If a DFA has been instructed to use /// byte classes (the default), then the number of transitions is usually /// substantially fewer. /// /// In practice, T is either `Vec<u32>` or `&[u32]`. table: T, /// A set of equivalence classes, where a single equivalence class /// represents a set of bytes that never discriminate between a match /// and a non-match in the DFA. Each equivalence class corresponds to a /// single character in this DFA's alphabet, where the maximum number of /// characters is 257 (each possible value of a byte plus the special /// EOI transition). Consequently, the number of equivalence classes /// corresponds to the number of transitions for each DFA state. Note /// though that the *space* used by each DFA state in the transition table /// may be larger. The total space used by each DFA state is known as the /// stride. /// /// The only time the number of equivalence classes is fewer than 257 is if /// the DFA's kind uses byte classes (which is the default). Equivalence /// classes should generally only be disabled when debugging, so that /// the transitions themselves aren't obscured. Disabling them has no /// other benefit, since the equivalence class map is always used while /// searching. In the vast majority of cases, the number of equivalence /// classes is substantially smaller than 257, particularly when large /// Unicode classes aren't used. classes: ByteClasses, /// The stride of each DFA state, expressed as a power-of-two exponent. /// /// The stride of a DFA corresponds to the total amount of space used by /// each DFA state in the transition table. This may be bigger than the /// size of a DFA's alphabet, since the stride is always the smallest /// power of two greater than or equal to the alphabet size. /// /// While this wastes space, this avoids the need for integer division /// to convert between premultiplied state IDs and their corresponding /// indices. Instead, we can use simple bit-shifts. /// /// See the docs for the `stride2` method for more details. /// /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) /// while the maximum `stride2` value is `9` (corresponding to a stride of /// `512`). The maximum is not `8` since the maximum alphabet size is `257` /// when accounting for the special EOI transition. However, an alphabet /// length of that size is exceptionally rare since the alphabet is shrunk /// into equivalence classes. stride2: usize, } impl<'a> TransitionTable<&'a [u32]> { /// Deserialize a transition table starting at the beginning of `slice`. /// Upon success, return the total number of bytes read along with the /// transition table. /// /// If there was a problem deserializing any part of the transition table, /// then this returns an error. Notably, if the given slice does not have /// the same alignment as `StateID`, then this will return an error (among /// other possible errors). /// /// This is guaranteed to execute in constant time. /// /// # Safety /// /// This routine is not safe because it does not check the validity of the /// transition table itself. In particular, the transition table can be /// quite large, so checking its validity can be somewhat expensive. An /// invalid transition table is not safe because other code may rely on the /// transition table being correct (such as explicit bounds check elision). /// Therefore, an invalid transition table can lead to undefined behavior. /// /// Callers that use this function must either pass on the safety invariant /// or guarantee that the bytes given contain a valid transition table. /// This guarantee is upheld by the bytes written by `write_to`. unsafe fn from_bytes_unchecked( mut slice: &'a [u8], ) -> Result<(TransitionTable<&'a [u32]>, usize), DeserializeError> { let slice_start = slice.as_ptr().as_usize(); let (state_len, nr) = wire::try_read_u32_as_usize(slice, "state length")?; slice = &slice[nr..]; let (stride2, nr) = wire::try_read_u32_as_usize(slice, "stride2")?; slice = &slice[nr..]; let (classes, nr) = ByteClasses::from_bytes(slice)?; slice = &slice[nr..]; // The alphabet length (determined by the byte class map) cannot be // bigger than the stride (total space used by each DFA state). if stride2 > 9 { return Err(DeserializeError::generic( "dense DFA has invalid stride2 (too big)", )); } // It also cannot be zero, since even a DFA that never matches anything // has a non-zero number of states with at least two equivalence // classes: one for all 256 byte values and another for the EOI // sentinel. if stride2 < 1 { return Err(DeserializeError::generic( "dense DFA has invalid stride2 (too small)", )); } // This is OK since 1 <= stride2 <= 9. let stride = 1usize.checked_shl(u32::try_from(stride2).unwrap()).unwrap(); if classes.alphabet_len() > stride { return Err(DeserializeError::generic( "alphabet size cannot be bigger than transition table stride", )); } let trans_len = wire::shl(state_len, stride2, "dense table transition length")?; let table_bytes_len = wire::mul( trans_len, StateID::SIZE, "dense table state byte length", )?; wire::check_slice_len(slice, table_bytes_len, "transition table")?; wire::check_alignment::<StateID>(slice)?; let table_bytes = &slice[..table_bytes_len]; slice = &slice[table_bytes_len..]; // SAFETY: Since StateID is always representable as a u32, all we need // to do is ensure that we have the proper length and alignment. We've // checked both above, so the cast below is safe. // // N.B. This is the only not-safe code in this function. let table = core::slice::from_raw_parts( table_bytes.as_ptr().cast::<u32>(), trans_len, ); let tt = TransitionTable { table, classes, stride2 }; Ok((tt, slice.as_ptr().as_usize() - slice_start)) } } #[cfg(feature = "dfa-build")] impl TransitionTable<Vec<u32>> { /// Create a minimal transition table with just two states: a dead state /// and a quit state. The alphabet length and stride of the transition /// table is determined by the given set of equivalence classes. fn minimal(classes: ByteClasses) -> TransitionTable<Vec<u32>> { let mut tt = TransitionTable { table: vec![], classes, stride2: classes.stride2(), }; // Two states, regardless of alphabet size, can always fit into u32. tt.add_empty_state().unwrap(); // dead state tt.add_empty_state().unwrap(); // quit state tt } /// Set a transition in this table. Both the `from` and `to` states must /// already exist, otherwise this panics. `unit` should correspond to the /// transition out of `from` to set to `to`. fn set(&mut self, from: StateID, unit: alphabet::Unit, to: StateID) { assert!(self.is_valid(from), "invalid 'from' state"); assert!(self.is_valid(to), "invalid 'to' state"); self.table[from.as_usize() + self.classes.get_by_unit(unit)] = to.as_u32(); } /// Add an empty state (a state where all transitions lead to a dead state) /// and return its identifier. The identifier returned is guaranteed to /// not point to any other existing state. /// /// If adding a state would exhaust the state identifier space, then this /// returns an error. fn add_empty_state(&mut self) -> Result<StateID, BuildError> { // Normally, to get a fresh state identifier, we would just // take the index of the next state added to the transition // table. However, we actually perform an optimization here // that premultiplies state IDs by the stride, such that they // point immediately at the beginning of their transitions in // the transition table. This avoids an extra multiplication // instruction for state lookup at search time. // // Premultiplied identifiers means that instead of your matching // loop looking something like this: // // state = dfa.start // for byte in haystack: // next = dfa.transitions[state * stride + byte] // if dfa.is_match(next): // return true // return false // // it can instead look like this: // // state = dfa.start // for byte in haystack: // next = dfa.transitions[state + byte] // if dfa.is_match(next): // return true // return false // // In other words, we save a multiplication instruction in the // critical path. This turns out to be a decent performance win. // The cost of using premultiplied state ids is that they can // require a bigger state id representation. (And they also make // the code a bit more complex, especially during minimization and // when reshuffling states, as one needs to convert back and forth // between state IDs and state indices.) // // To do this, we simply take the index of the state into the // entire transition table, rather than the index of the state // itself. e.g., If the stride is 64, then the ID of the 3rd state // is 192, not 2. let next = self.table.len(); let id = StateID::new(next).map_err(|_| BuildError::too_many_states())?; self.table.extend(iter::repeat(0).take(self.stride())); Ok(id) } /// Swap the two states given in this transition table. /// /// This routine does not do anything to check the correctness of this /// swap. Callers must ensure that other states pointing to id1 and id2 are /// updated appropriately. /// /// Both id1 and id2 must point to valid states, otherwise this panics. fn swap(&mut self, id1: StateID, id2: StateID) { assert!(self.is_valid(id1), "invalid 'id1' state: {:?}", id1); assert!(self.is_valid(id2), "invalid 'id2' state: {:?}", id2); // We only need to swap the parts of the state that are used. So if the // stride is 64, but the alphabet length is only 33, then we save a lot // of work. for b in 0..self.classes.alphabet_len() { self.table.swap(id1.as_usize() + b, id2.as_usize() + b); } } /// Remap the transitions for the state given according to the function /// given. This applies the given map function to every transition in the /// given state and changes the transition in place to the result of the /// map function for that transition. fn remap(&mut self, id: StateID, map: impl Fn(StateID) -> StateID) { for byte in 0..self.alphabet_len() { let i = id.as_usize() + byte; let next = self.table()[i]; self.table_mut()[id.as_usize() + byte] = map(next); } } /// Truncate the states in this transition table to the given length. /// /// This routine does not do anything to check the correctness of this /// truncation. Callers must ensure that other states pointing to truncated /// states are updated appropriately. fn truncate(&mut self, len: usize) { self.table.truncate(len << self.stride2); } } impl<T: AsRef<[u32]>> TransitionTable<T> { /// Writes a serialized form of this transition table to the buffer given. /// If the buffer is too small, then an error is returned. To determine /// how big the buffer must be, use `write_to_len`. fn write_to<E: Endian>( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("transition table")); } dst = &mut dst[..nwrite]; // write state length // Unwrap is OK since number of states is guaranteed to fit in a u32. E::write_u32(u32::try_from(self.len()).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write state stride (as power of 2) // Unwrap is OK since stride2 is guaranteed to be <= 9. E::write_u32(u32::try_from(self.stride2).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write byte class map let n = self.classes.write_to(dst)?; dst = &mut dst[n..]; // write actual transitions for &sid in self.table() { let n = wire::write_state_id::<E>(sid, &mut dst); dst = &mut dst[n..]; } Ok(nwrite) } /// Returns the number of bytes the serialized form of this transition /// table will use. fn write_to_len(&self) -> usize { size_of::<u32>() // state length + size_of::<u32>() // stride2 + self.classes.write_to_len() + (self.table().len() * StateID::SIZE) } /// Validates that every state ID in this transition table is valid. /// /// That is, every state ID can be used to correctly index a state in this /// table. fn validate(&self, sp: &Special) -> Result<(), DeserializeError> { for state in self.states() { // We check that the ID itself is well formed. That is, if it's // a special state then it must actually be a quit, dead, accel, // match or start state. if sp.is_special_state(state.id()) { let is_actually_special = sp.is_dead_state(state.id()) || sp.is_quit_state(state.id()) || sp.is_match_state(state.id()) || sp.is_start_state(state.id()) || sp.is_accel_state(state.id()); if !is_actually_special { // This is kind of a cryptic error message... return Err(DeserializeError::generic( "found dense state tagged as special but \ wasn't actually special", )); } } for (_, to) in state.transitions() { if !self.is_valid(to) { return Err(DeserializeError::generic( "found invalid state ID in transition table", )); } } } Ok(()) } /// Converts this transition table to a borrowed value. fn as_ref(&self) -> TransitionTable<&'_ [u32]> { TransitionTable { table: self.table.as_ref(), classes: self.classes.clone(), stride2: self.stride2, } } /// Converts this transition table to an owned value. #[cfg(feature = "alloc")] fn to_owned(&self) -> TransitionTable<alloc::vec::Vec<u32>> { TransitionTable { table: self.table.as_ref().to_vec(), classes: self.classes.clone(), stride2: self.stride2, } } /// Return the state for the given ID. If the given ID is not valid, then /// this panics. fn state(&self, id: StateID) -> State<'_> { assert!(self.is_valid(id)); let i = id.as_usize(); State { id, stride2: self.stride2, transitions: &self.table()[i..i + self.alphabet_len()], } } /// Returns an iterator over all states in this transition table. /// /// This iterator yields a tuple for each state. The first element of the /// tuple corresponds to a state's identifier, and the second element /// corresponds to the state itself (comprised of its transitions). fn states(&self) -> StateIter<'_, T> { StateIter { tt: self, it: self.table().chunks(self.stride()).enumerate(), } } /// Convert a state identifier to an index to a state (in the range /// 0..self.len()). /// /// This is useful when using a `Vec<T>` as an efficient map keyed by state /// to some other information (such as a remapped state ID). /// /// If the given ID is not valid, then this may panic or produce an /// incorrect index. fn to_index(&self, id: StateID) -> usize { id.as_usize() >> self.stride2 } /// Convert an index to a state (in the range 0..self.len()) to an actual /// state identifier. /// /// This is useful when using a `Vec<T>` as an efficient map keyed by state /// to some other information (such as a remapped state ID). /// /// If the given index is not in the specified range, then this may panic /// or produce an incorrect state ID. fn to_state_id(&self, index: usize) -> StateID { // CORRECTNESS: If the given index is not valid, then it is not // required for this to panic or return a valid state ID. StateID::new_unchecked(index << self.stride2) } /// Returns the state ID for the state immediately following the one given. /// /// This does not check whether the state ID returned is invalid. In fact, /// if the state ID given is the last state in this DFA, then the state ID /// returned is guaranteed to be invalid. #[cfg(feature = "dfa-build")] fn next_state_id(&self, id: StateID) -> StateID { self.to_state_id(self.to_index(id).checked_add(1).unwrap()) } /// Returns the state ID for the state immediately preceding the one given. /// /// If the dead ID given (which is zero), then this panics. #[cfg(feature = "dfa-build")] fn prev_state_id(&self, id: StateID) -> StateID { self.to_state_id(self.to_index(id).checked_sub(1).unwrap()) } /// Returns the table as a slice of state IDs. fn table(&self) -> &[StateID] { wire::u32s_to_state_ids(self.table.as_ref()) } /// Returns the total number of states in this transition table. /// /// Note that a DFA always has at least two states: the dead and quit /// states. In particular, the dead state always has ID 0 and is /// correspondingly always the first state. The dead state is never a match /// state. fn len(&self) -> usize { self.table().len() >> self.stride2 } /// Returns the total stride for every state in this DFA. This corresponds /// to the total number of transitions used by each state in this DFA's /// transition table. fn stride(&self) -> usize { 1 << self.stride2 } /// Returns the total number of elements in the alphabet for this /// transition table. This is always less than or equal to `self.stride()`. /// It is only equal when the alphabet length is a power of 2. Otherwise, /// it is always strictly less. fn alphabet_len(&self) -> usize { self.classes.alphabet_len() } /// Returns true if and only if the given state ID is valid for this /// transition table. Validity in this context means that the given ID can /// be used as a valid offset with `self.stride()` to index this transition /// table. fn is_valid(&self, id: StateID) -> bool { let id = id.as_usize(); id < self.table().len() && id % self.stride() == 0 } /// Return the memory usage, in bytes, of this transition table. /// /// This does not include the size of a `TransitionTable` value itself. fn memory_usage(&self) -> usize { self.table().len() * StateID::SIZE } } #[cfg(feature = "dfa-build")] impl<T: AsMut<[u32]>> TransitionTable<T> { /// Returns the table as a slice of state IDs. fn table_mut(&mut self) -> &mut [StateID] { wire::u32s_to_state_ids_mut(self.table.as_mut()) } } /// The set of all possible starting states in a DFA. /// /// The set of starting states corresponds to the possible choices one can make /// in terms of starting a DFA. That is, before following the first transition, /// you first need to select the state that you start in. /// /// Normally, a DFA converted from an NFA that has a single starting state /// would itself just have one starting state. However, our support for look /// around generally requires more starting states. The correct starting state /// is chosen based on certain properties of the position at which we begin /// our search. /// /// Before listing those properties, we first must define two terms: /// /// * `haystack` - The bytes to execute the search. The search always starts /// at the beginning of `haystack` and ends before or at the end of /// `haystack`. /// * `context` - The (possibly empty) bytes surrounding `haystack`. `haystack` /// must be contained within `context` such that `context` is at least as big /// as `haystack`. /// /// This split is crucial for dealing with look-around. For example, consider /// the context `foobarbaz`, the haystack `bar` and the regex `^bar$`. This /// regex should _not_ match the haystack since `bar` does not appear at the /// beginning of the input. Similarly, the regex `\Bbar\B` should match the /// haystack because `bar` is not surrounded by word boundaries. But a search /// that does not take context into account would not permit `\B` to match /// since the beginning of any string matches a word boundary. Similarly, a /// search that does not take context into account when searching `^bar$` in /// the haystack `bar` would produce a match when it shouldn't. /// /// Thus, it follows that the starting state is chosen based on the following /// criteria, derived from the position at which the search starts in the /// `context` (corresponding to the start of `haystack`): /// /// 1. If the search starts at the beginning of `context`, then the `Text` /// start state is used. (Since `^` corresponds to /// `hir::Anchor::Start`.) /// 2. If the search starts at a position immediately following a line /// terminator, then the `Line` start state is used. (Since `(?m:^)` /// corresponds to `hir::Anchor::StartLF`.) /// 3. If the search starts at a position immediately following a byte /// classified as a "word" character (`[_0-9a-zA-Z]`), then the `WordByte` /// start state is used. (Since `(?-u:\b)` corresponds to a word boundary.) /// 4. Otherwise, if the search starts at a position immediately following /// a byte that is not classified as a "word" character (`[^_0-9a-zA-Z]`), /// then the `NonWordByte` start state is used. (Since `(?-u:\B)` /// corresponds to a not-word-boundary.) /// /// (N.B. Unicode word boundaries are not supported by the DFA because they /// require multi-byte look-around and this is difficult to support in a DFA.) /// /// To further complicate things, we also support constructing individual /// anchored start states for each pattern in the DFA. (Which is required to /// implement overlapping regexes correctly, but is also generally useful.) /// Thus, when individual start states for each pattern are enabled, then the /// total number of start states represented is `4 + (4 * #patterns)`, where /// the 4 comes from each of the 4 possibilities above. The first 4 represents /// the starting states for the entire DFA, which support searching for /// multiple patterns simultaneously (possibly unanchored). /// /// If individual start states are disabled, then this will only store 4 /// start states. Typically, individual start states are only enabled when /// constructing the reverse DFA for regex matching. But they are also useful /// for building DFAs that can search for a specific pattern or even to support /// both anchored and unanchored searches with the same DFA. /// /// Note though that while the start table always has either `4` or /// `4 + (4 * #patterns)` starting state *ids*, the total number of states /// might be considerably smaller. That is, many of the IDs may be duplicative. /// (For example, if a regex doesn't have a `\b` sub-pattern, then there's no /// reason to generate a unique starting state for handling word boundaries. /// Similarly for start/end anchors.) #[derive(Clone)] pub(crate) struct StartTable<T> { /// The initial start state IDs. /// /// In practice, T is either `Vec<u32>` or `&[u32]`. /// /// The first `2 * stride` (currently always 8) entries always correspond /// to the starts states for the entire DFA, with the first 4 entries being /// for unanchored searches and the second 4 entries being for anchored /// searches. To keep things simple, we always use 8 entries even if the /// `StartKind` is not both. /// /// After that, there are `stride * patterns` state IDs, where `patterns` /// may be zero in the case of a DFA with no patterns or in the case where /// the DFA was built without enabling starting states for each pattern. table: T, /// The starting state configuration supported. When 'both', both /// unanchored and anchored searches work. When 'unanchored', anchored /// searches panic. When 'anchored', unanchored searches panic. kind: StartKind, /// The start state configuration for every possible byte. start_map: StartByteMap, /// The number of starting state IDs per pattern. stride: usize, /// The total number of patterns for which starting states are encoded. /// This is `None` for DFAs that were built without start states for each /// pattern. Thus, one cannot use this field to say how many patterns /// are in the DFA in all cases. It is specific to how many patterns are /// represented in this start table. pattern_len: Option<usize>, /// The universal starting state for unanchored searches. This is only /// present when the DFA supports unanchored searches and when all starting /// state IDs for an unanchored search are equivalent. universal_start_unanchored: Option<StateID>, /// The universal starting state for anchored searches. This is only /// present when the DFA supports anchored searches and when all starting /// state IDs for an anchored search are equivalent. universal_start_anchored: Option<StateID>, } #[cfg(feature = "dfa-build")] impl StartTable<Vec<u32>> { /// Create a valid set of start states all pointing to the dead state. /// /// When the corresponding DFA is constructed with start states for each /// pattern, then `patterns` should be the number of patterns. Otherwise, /// it should be zero. /// /// If the total table size could exceed the allocatable limit, then this /// returns an error. In practice, this is unlikely to be able to occur, /// since it's likely that allocation would have failed long before it got /// to this point. fn dead( kind: StartKind, lookm: &LookMatcher, pattern_len: Option<usize>, ) -> Result<StartTable<Vec<u32>>, BuildError> { if let Some(len) = pattern_len { assert!(len <= PatternID::LIMIT); } let stride = Start::len(); // OK because 2*4 is never going to overflow anything. let starts_len = stride.checked_mul(2).unwrap(); let pattern_starts_len = match stride.checked_mul(pattern_len.unwrap_or(0)) { Some(x) => x, None => return Err(BuildError::too_many_start_states()), }; let table_len = match starts_len.checked_add(pattern_starts_len) { Some(x) => x, None => return Err(BuildError::too_many_start_states()), }; if let Err(_) = isize::try_from(table_len) { return Err(BuildError::too_many_start_states()); } let table = vec![DEAD.as_u32(); table_len]; let start_map = StartByteMap::new(lookm); Ok(StartTable { table, kind, start_map, stride, pattern_len, universal_start_unanchored: None, universal_start_anchored: None, }) } } impl<'a> StartTable<&'a [u32]> { /// Deserialize a table of start state IDs starting at the beginning of /// `slice`. Upon success, return the total number of bytes read along with /// the table of starting state IDs. /// /// If there was a problem deserializing any part of the starting IDs, /// then this returns an error. Notably, if the given slice does not have /// the same alignment as `StateID`, then this will return an error (among /// other possible errors). /// /// This is guaranteed to execute in constant time. /// /// # Safety /// /// This routine is not safe because it does not check the validity of the /// starting state IDs themselves. In particular, the number of starting /// IDs can be of variable length, so it's possible that checking their /// validity cannot be done in constant time. An invalid starting state /// ID is not safe because other code may rely on the starting IDs being /// correct (such as explicit bounds check elision). Therefore, an invalid /// start ID can lead to undefined behavior. /// /// Callers that use this function must either pass on the safety invariant /// or guarantee that the bytes given contain valid starting state IDs. /// This guarantee is upheld by the bytes written by `write_to`. unsafe fn from_bytes_unchecked( mut slice: &'a [u8], ) -> Result<(StartTable<&'a [u32]>, usize), DeserializeError> { let slice_start = slice.as_ptr().as_usize(); let (kind, nr) = StartKind::from_bytes(slice)?; slice = &slice[nr..]; let (start_map, nr) = StartByteMap::from_bytes(slice)?; slice = &slice[nr..]; let (stride, nr) = wire::try_read_u32_as_usize(slice, "start table stride")?; slice = &slice[nr..]; if stride != Start::len() { return Err(DeserializeError::generic( "invalid starting table stride", )); } let (maybe_pattern_len, nr) = wire::try_read_u32_as_usize(slice, "start table patterns")?; slice = &slice[nr..]; let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX { None } else { Some(maybe_pattern_len) }; if pattern_len.map_or(false, |len| len > PatternID::LIMIT) { return Err(DeserializeError::generic( "invalid number of patterns", )); } let (universal_unanchored, nr) = wire::try_read_u32(slice, "universal unanchored start")?; slice = &slice[nr..]; let universal_start_unanchored = if universal_unanchored == u32::MAX { None } else { Some(StateID::try_from(universal_unanchored).map_err(|e| { DeserializeError::state_id_error( e, "universal unanchored start", ) })?) }; let (universal_anchored, nr) = wire::try_read_u32(slice, "universal anchored start")?; slice = &slice[nr..]; let universal_start_anchored = if universal_anchored == u32::MAX { None } else { Some(StateID::try_from(universal_anchored).map_err(|e| { DeserializeError::state_id_error(e, "universal anchored start") })?) }; let pattern_table_size = wire::mul( stride, pattern_len.unwrap_or(0), "invalid pattern length", )?; // Our start states always start with a two stride of start states for // the entire automaton. The first stride is for unanchored starting // states and the second stride is for anchored starting states. What // follows it are an optional set of start states for each pattern. let start_state_len = wire::add( wire::mul(2, stride, "start state stride too big")?, pattern_table_size, "invalid 'any' pattern starts size", )?; let table_bytes_len = wire::mul( start_state_len, StateID::SIZE, "pattern table bytes length", )?; wire::check_slice_len(slice, table_bytes_len, "start ID table")?; wire::check_alignment::<StateID>(slice)?; let table_bytes = &slice[..table_bytes_len]; slice = &slice[table_bytes_len..]; // SAFETY: Since StateID is always representable as a u32, all we need // to do is ensure that we have the proper length and alignment. We've // checked both above, so the cast below is safe. // // N.B. This is the only not-safe code in this function. let table = core::slice::from_raw_parts( table_bytes.as_ptr().cast::<u32>(), start_state_len, ); let st = StartTable { table, kind, start_map, stride, pattern_len, universal_start_unanchored, universal_start_anchored, }; Ok((st, slice.as_ptr().as_usize() - slice_start)) } } impl<T: AsRef<[u32]>> StartTable<T> { /// Writes a serialized form of this start table to the buffer given. If /// the buffer is too small, then an error is returned. To determine how /// big the buffer must be, use `write_to_len`. fn write_to<E: Endian>( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small( "starting table ids", )); } dst = &mut dst[..nwrite]; // write start kind let nw = self.kind.write_to::<E>(dst)?; dst = &mut dst[nw..]; // write start byte map let nw = self.start_map.write_to(dst)?; dst = &mut dst[nw..]; // write stride // Unwrap is OK since the stride is always 4 (currently). E::write_u32(u32::try_from(self.stride).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write pattern length // Unwrap is OK since number of patterns is guaranteed to fit in a u32. E::write_u32( u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(), dst, ); dst = &mut dst[size_of::<u32>()..]; // write universal start unanchored state id, u32::MAX if absent E::write_u32( self.universal_start_unanchored .map_or(u32::MAX, |sid| sid.as_u32()), dst, ); dst = &mut dst[size_of::<u32>()..]; // write universal start anchored state id, u32::MAX if absent E::write_u32( self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()), dst, ); dst = &mut dst[size_of::<u32>()..]; // write start IDs for &sid in self.table() { let n = wire::write_state_id::<E>(sid, &mut dst); dst = &mut dst[n..]; } Ok(nwrite) } /// Returns the number of bytes the serialized form of this start ID table /// will use. fn write_to_len(&self) -> usize { self.kind.write_to_len() + self.start_map.write_to_len() + size_of::<u32>() // stride + size_of::<u32>() // # patterns + size_of::<u32>() // universal unanchored start + size_of::<u32>() // universal anchored start + (self.table().len() * StateID::SIZE) } /// Validates that every state ID in this start table is valid by checking /// it against the given transition table (which must be for the same DFA). /// /// That is, every state ID can be used to correctly index a state. fn validate( &self, tt: &TransitionTable<T>, ) -> Result<(), DeserializeError> { if !self.universal_start_unanchored.map_or(true, |s| tt.is_valid(s)) { return Err(DeserializeError::generic( "found invalid universal unanchored starting state ID", )); } if !self.universal_start_anchored.map_or(true, |s| tt.is_valid(s)) { return Err(DeserializeError::generic( "found invalid universal anchored starting state ID", )); } for &id in self.table() { if !tt.is_valid(id) { return Err(DeserializeError::generic( "found invalid starting state ID", )); } } Ok(()) } /// Converts this start list to a borrowed value. fn as_ref(&self) -> StartTable<&'_ [u32]> { StartTable { table: self.table.as_ref(), kind: self.kind, start_map: self.start_map.clone(), stride: self.stride, pattern_len: self.pattern_len, universal_start_unanchored: self.universal_start_unanchored, universal_start_anchored: self.universal_start_anchored, } } /// Converts this start list to an owned value. #[cfg(feature = "alloc")] fn to_owned(&self) -> StartTable<alloc::vec::Vec<u32>> { StartTable { table: self.table.as_ref().to_vec(), kind: self.kind, start_map: self.start_map.clone(), stride: self.stride, pattern_len: self.pattern_len, universal_start_unanchored: self.universal_start_unanchored, universal_start_anchored: self.universal_start_anchored, } } /// Return the start state for the given input and starting configuration. /// This returns an error if the input configuration is not supported by /// this DFA. For example, requesting an unanchored search when the DFA was /// not built with unanchored starting states. Or asking for an anchored /// pattern search with an invalid pattern ID or on a DFA that was not /// built with start states for each pattern. #[cfg_attr(feature = "perf-inline", inline(always))] fn start( &self, input: &Input<'_>, start: Start, ) -> Result<StateID, MatchError> { let start_index = start.as_usize(); let mode = input.get_anchored(); let index = match mode { Anchored::No => { if !self.kind.has_unanchored() { return Err(MatchError::unsupported_anchored(mode)); } start_index } Anchored::Yes => { if !self.kind.has_anchored() { return Err(MatchError::unsupported_anchored(mode)); } self.stride + start_index } Anchored::Pattern(pid) => { let len = match self.pattern_len { None => { return Err(MatchError::unsupported_anchored(mode)) } Some(len) => len, }; if pid.as_usize() >= len { return Ok(DEAD); } (2 * self.stride) + (self.stride * pid.as_usize()) + start_index } }; Ok(self.table()[index]) } /// Returns an iterator over all start state IDs in this table. /// /// Each item is a triple of: start state ID, the start state type and the /// pattern ID (if any). fn iter(&self) -> StartStateIter<'_> { StartStateIter { st: self.as_ref(), i: 0 } } /// Returns the table as a slice of state IDs. fn table(&self) -> &[StateID] { wire::u32s_to_state_ids(self.table.as_ref()) } /// Return the memory usage, in bytes, of this start list. /// /// This does not include the size of a `StartList` value itself. fn memory_usage(&self) -> usize { self.table().len() * StateID::SIZE } } #[cfg(feature = "dfa-build")] impl<T: AsMut<[u32]>> StartTable<T> { /// Set the start state for the given index and pattern. /// /// If the pattern ID or state ID are not valid, then this will panic. fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) { let start_index = start.as_usize(); let index = match anchored { Anchored::No => start_index, Anchored::Yes => self.stride + start_index, Anchored::Pattern(pid) => { let pid = pid.as_usize(); let len = self .pattern_len .expect("start states for each pattern enabled"); assert!(pid < len, "invalid pattern ID {:?}", pid); self.stride .checked_mul(pid) .unwrap() .checked_add(self.stride.checked_mul(2).unwrap()) .unwrap() .checked_add(start_index) .unwrap() } }; self.table_mut()[index] = id; } /// Returns the table as a mutable slice of state IDs. fn table_mut(&mut self) -> &mut [StateID] { wire::u32s_to_state_ids_mut(self.table.as_mut()) } } /// An iterator over start state IDs. /// /// This iterator yields a triple of start state ID, the anchored mode and the /// start state type. If a pattern ID is relevant, then the anchored mode will /// contain it. Start states with an anchored mode containing a pattern ID will /// only occur when the DFA was compiled with start states for each pattern /// (which is disabled by default). pub(crate) struct StartStateIter<'a> { st: StartTable<&'a [u32]>, i: usize, } impl<'a> Iterator for StartStateIter<'a> { type Item = (StateID, Anchored, Start); fn next(&mut self) -> Option<(StateID, Anchored, Start)> { let i = self.i; let table = self.st.table(); if i >= table.len() { return None; } self.i += 1; // This unwrap is okay since the stride of the starting state table // must always match the number of start state types. let start_type = Start::from_usize(i % self.st.stride).unwrap(); let anchored = if i < self.st.stride { Anchored::No } else if i < (2 * self.st.stride) { Anchored::Yes } else { let pid = (i - (2 * self.st.stride)) / self.st.stride; Anchored::Pattern(PatternID::new(pid).unwrap()) }; Some((table[i], anchored, start_type)) } } /// This type represents that patterns that should be reported whenever a DFA /// enters a match state. This structure exists to support DFAs that search for /// matches for multiple regexes. /// /// This structure relies on the fact that all match states in a DFA occur /// contiguously in the DFA's transition table. (See dfa/special.rs for a more /// detailed breakdown of the representation.) Namely, when a match occurs, we /// know its state ID. Since we know the start and end of the contiguous region /// of match states, we can use that to compute the position at which the match /// state occurs. That in turn is used as an offset into this structure. #[derive(Clone, Debug)] struct MatchStates<T> { /// Slices is a flattened sequence of pairs, where each pair points to a /// sub-slice of pattern_ids. The first element of the pair is an offset /// into pattern_ids and the second element of the pair is the number /// of 32-bit pattern IDs starting at that position. That is, each pair /// corresponds to a single DFA match state and its corresponding match /// IDs. The number of pairs always corresponds to the number of distinct /// DFA match states. /// /// In practice, T is either Vec<u32> or &[u32]. slices: T, /// A flattened sequence of pattern IDs for each DFA match state. The only /// way to correctly read this sequence is indirectly via `slices`. /// /// In practice, T is either Vec<u32> or &[u32]. pattern_ids: T, /// The total number of unique patterns represented by these match states. pattern_len: usize, } impl<'a> MatchStates<&'a [u32]> { unsafe fn from_bytes_unchecked( mut slice: &'a [u8], ) -> Result<(MatchStates<&'a [u32]>, usize), DeserializeError> { let slice_start = slice.as_ptr().as_usize(); // Read the total number of match states. let (state_len, nr) = wire::try_read_u32_as_usize(slice, "match state length")?; slice = &slice[nr..]; // Read the slice start/length pairs. let pair_len = wire::mul(2, state_len, "match state offset pairs")?; let slices_bytes_len = wire::mul( pair_len, PatternID::SIZE, "match state slice offset byte length", )?; wire::check_slice_len(slice, slices_bytes_len, "match state slices")?; wire::check_alignment::<PatternID>(slice)?; let slices_bytes = &slice[..slices_bytes_len]; slice = &slice[slices_bytes_len..]; // SAFETY: Since PatternID is always representable as a u32, all we // need to do is ensure that we have the proper length and alignment. // We've checked both above, so the cast below is safe. // // N.B. This is one of the few not-safe snippets in this function, // so we mark it explicitly to call it out. let slices = core::slice::from_raw_parts( slices_bytes.as_ptr().cast::<u32>(), pair_len, ); // Read the total number of unique pattern IDs (which is always 1 more // than the maximum pattern ID in this automaton, since pattern IDs are // handed out contiguously starting at 0). let (pattern_len, nr) = wire::try_read_u32_as_usize(slice, "pattern length")?; slice = &slice[nr..]; // Now read the pattern ID length. We don't need to store this // explicitly, but we need it to know how many pattern IDs to read. let (idlen, nr) = wire::try_read_u32_as_usize(slice, "pattern ID length")?; slice = &slice[nr..]; // Read the actual pattern IDs. let pattern_ids_len = wire::mul(idlen, PatternID::SIZE, "pattern ID byte length")?; wire::check_slice_len(slice, pattern_ids_len, "match pattern IDs")?; wire::check_alignment::<PatternID>(slice)?; let pattern_ids_bytes = &slice[..pattern_ids_len]; slice = &slice[pattern_ids_len..]; // SAFETY: Since PatternID is always representable as a u32, all we // need to do is ensure that we have the proper length and alignment. // We've checked both above, so the cast below is safe. // // N.B. This is one of the few not-safe snippets in this function, // so we mark it explicitly to call it out. let pattern_ids = core::slice::from_raw_parts( pattern_ids_bytes.as_ptr().cast::<u32>(), idlen, ); let ms = MatchStates { slices, pattern_ids, pattern_len }; Ok((ms, slice.as_ptr().as_usize() - slice_start)) } } #[cfg(feature = "dfa-build")] impl MatchStates<Vec<u32>> { fn empty(pattern_len: usize) -> MatchStates<Vec<u32>> { assert!(pattern_len <= PatternID::LIMIT); MatchStates { slices: vec![], pattern_ids: vec![], pattern_len } } fn new( matches: &BTreeMap<StateID, Vec<PatternID>>, pattern_len: usize, ) -> Result<MatchStates<Vec<u32>>, BuildError> { let mut m = MatchStates::empty(pattern_len); for (_, pids) in matches.iter() { let start = PatternID::new(m.pattern_ids.len()) .map_err(|_| BuildError::too_many_match_pattern_ids())?; m.slices.push(start.as_u32()); // This is always correct since the number of patterns in a single // match state can never exceed maximum number of allowable // patterns. Why? Because a pattern can only appear once in a // particular match state, by construction. (And since our pattern // ID limit is one less than u32::MAX, we're guaranteed that the // length fits in a u32.) m.slices.push(u32::try_from(pids.len()).unwrap()); for &pid in pids { m.pattern_ids.push(pid.as_u32()); } } m.pattern_len = pattern_len; Ok(m) } fn new_with_map( &self, matches: &BTreeMap<StateID, Vec<PatternID>>, ) -> Result<MatchStates<Vec<u32>>, BuildError> { MatchStates::new(matches, self.pattern_len) } } impl<T: AsRef<[u32]>> MatchStates<T> { /// Writes a serialized form of these match states to the buffer given. If /// the buffer is too small, then an error is returned. To determine how /// big the buffer must be, use `write_to_len`. fn write_to<E: Endian>( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("match states")); } dst = &mut dst[..nwrite]; // write state ID length // Unwrap is OK since number of states is guaranteed to fit in a u32. E::write_u32(u32::try_from(self.len()).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write slice offset pairs for &pid in self.slices() { let n = wire::write_pattern_id::<E>(pid, &mut dst); dst = &mut dst[n..]; } // write unique pattern ID length // Unwrap is OK since number of patterns is guaranteed to fit in a u32. E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write pattern ID length // Unwrap is OK since we check at construction (and deserialization) // that the number of patterns is representable as a u32. E::write_u32(u32::try_from(self.pattern_ids().len()).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write pattern IDs for &pid in self.pattern_ids() { let n = wire::write_pattern_id::<E>(pid, &mut dst); dst = &mut dst[n..]; } Ok(nwrite) } /// Returns the number of bytes the serialized form of these match states /// will use. fn write_to_len(&self) -> usize { size_of::<u32>() // match state length + (self.slices().len() * PatternID::SIZE) + size_of::<u32>() // unique pattern ID length + size_of::<u32>() // pattern ID length + (self.pattern_ids().len() * PatternID::SIZE) } /// Valides that the match state info is itself internally consistent and /// consistent with the recorded match state region in the given DFA. fn validate(&self, dfa: &DFA<T>) -> Result<(), DeserializeError> { if self.len() != dfa.special.match_len(dfa.stride()) { return Err(DeserializeError::generic( "match state length mismatch", )); } for si in 0..self.len() { let start = self.slices()[si * 2].as_usize(); let len = self.slices()[si * 2 + 1].as_usize(); if start >= self.pattern_ids().len() { return Err(DeserializeError::generic( "invalid pattern ID start offset", )); } if start + len > self.pattern_ids().len() { return Err(DeserializeError::generic( "invalid pattern ID length", )); } for mi in 0..len { let pid = self.pattern_id(si, mi); if pid.as_usize() >= self.pattern_len { return Err(DeserializeError::generic( "invalid pattern ID", )); } } } Ok(()) } /// Converts these match states back into their map form. This is useful /// when shuffling states, as the normal MatchStates representation is not /// amenable to easy state swapping. But with this map, to swap id1 and /// id2, all you need to do is: /// /// if let Some(pids) = map.remove(&id1) { /// map.insert(id2, pids); /// } /// /// Once shuffling is done, use MatchStates::new to convert back. #[cfg(feature = "dfa-build")] fn to_map(&self, dfa: &DFA<T>) -> BTreeMap<StateID, Vec<PatternID>> { let mut map = BTreeMap::new(); for i in 0..self.len() { let mut pids = vec![]; for j in 0..self.pattern_len(i) { pids.push(self.pattern_id(i, j)); } map.insert(self.match_state_id(dfa, i), pids); } map } /// Converts these match states to a borrowed value. fn as_ref(&self) -> MatchStates<&'_ [u32]> { MatchStates { slices: self.slices.as_ref(), pattern_ids: self.pattern_ids.as_ref(), pattern_len: self.pattern_len, } } /// Converts these match states to an owned value. #[cfg(feature = "alloc")] fn to_owned(&self) -> MatchStates<alloc::vec::Vec<u32>> { MatchStates { slices: self.slices.as_ref().to_vec(), pattern_ids: self.pattern_ids.as_ref().to_vec(), pattern_len: self.pattern_len, } } /// Returns the match state ID given the match state index. (Where the /// first match state corresponds to index 0.) /// /// This panics if there is no match state at the given index. fn match_state_id(&self, dfa: &DFA<T>, index: usize) -> StateID { assert!(dfa.special.matches(), "no match states to index"); // This is one of the places where we rely on the fact that match // states are contiguous in the transition table. Namely, that the // first match state ID always corresponds to dfa.special.min_start. // From there, since we know the stride, we can compute the ID of any // match state given its index. let stride2 = u32::try_from(dfa.stride2()).unwrap(); let offset = index.checked_shl(stride2).unwrap(); let id = dfa.special.min_match.as_usize().checked_add(offset).unwrap(); let sid = StateID::new(id).unwrap(); assert!(dfa.is_match_state(sid)); sid } /// Returns the pattern ID at the given match index for the given match /// state. /// /// The match state index is the state index minus the state index of the /// first match state in the DFA. /// /// The match index is the index of the pattern ID for the given state. /// The index must be less than `self.pattern_len(state_index)`. #[cfg_attr(feature = "perf-inline", inline(always))] fn pattern_id(&self, state_index: usize, match_index: usize) -> PatternID { self.pattern_id_slice(state_index)[match_index] } /// Returns the number of patterns in the given match state. /// /// The match state index is the state index minus the state index of the /// first match state in the DFA. #[cfg_attr(feature = "perf-inline", inline(always))] fn pattern_len(&self, state_index: usize) -> usize { self.slices()[state_index * 2 + 1].as_usize() } /// Returns all of the pattern IDs for the given match state index. /// /// The match state index is the state index minus the state index of the /// first match state in the DFA. #[cfg_attr(feature = "perf-inline", inline(always))] fn pattern_id_slice(&self, state_index: usize) -> &[PatternID] { let start = self.slices()[state_index * 2].as_usize(); let len = self.pattern_len(state_index); &self.pattern_ids()[start..start + len] } /// Returns the pattern ID offset slice of u32 as a slice of PatternID. #[cfg_attr(feature = "perf-inline", inline(always))] fn slices(&self) -> &[PatternID] { wire::u32s_to_pattern_ids(self.slices.as_ref()) } /// Returns the total number of match states. #[cfg_attr(feature = "perf-inline", inline(always))] fn len(&self) -> usize { assert_eq!(0, self.slices().len() % 2); self.slices().len() / 2 } /// Returns the pattern ID slice of u32 as a slice of PatternID. #[cfg_attr(feature = "perf-inline", inline(always))] fn pattern_ids(&self) -> &[PatternID] { wire::u32s_to_pattern_ids(self.pattern_ids.as_ref()) } /// Return the memory usage, in bytes, of these match pairs. fn memory_usage(&self) -> usize { (self.slices().len() + self.pattern_ids().len()) * PatternID::SIZE } } /// A common set of flags for both dense and sparse DFAs. This primarily /// centralizes the serialization format of these flags at a bitset. #[derive(Clone, Copy, Debug)] pub(crate) struct Flags { /// Whether the DFA can match the empty string. When this is false, all /// matches returned by this DFA are guaranteed to have non-zero length. pub(crate) has_empty: bool, /// Whether the DFA should only produce matches with spans that correspond /// to valid UTF-8. This also includes omitting any zero-width matches that /// split the UTF-8 encoding of a codepoint. pub(crate) is_utf8: bool, /// Whether the DFA is always anchored or not, regardless of `Input` /// configuration. This is useful for avoiding a reverse scan even when /// executing unanchored searches. pub(crate) is_always_start_anchored: bool, } impl Flags { /// Creates a set of flags for a DFA from an NFA. /// /// N.B. This constructor was defined at the time of writing because all /// of the flags are derived directly from the NFA. If this changes in the /// future, we might be more thoughtful about how the `Flags` value is /// itself built. #[cfg(feature = "dfa-build")] fn from_nfa(nfa: &thompson::NFA) -> Flags { Flags { has_empty: nfa.has_empty(), is_utf8: nfa.is_utf8(), is_always_start_anchored: nfa.is_always_start_anchored(), } } /// Deserializes the flags from the given slice. On success, this also /// returns the number of bytes read from the slice. pub(crate) fn from_bytes( slice: &[u8], ) -> Result<(Flags, usize), DeserializeError> { let (bits, nread) = wire::try_read_u32(slice, "flag bitset")?; let flags = Flags { has_empty: bits & (1 << 0) != 0, is_utf8: bits & (1 << 1) != 0, is_always_start_anchored: bits & (1 << 2) != 0, }; Ok((flags, nread)) } /// Writes these flags to the given byte slice. If the buffer is too small, /// then an error is returned. To determine how big the buffer must be, /// use `write_to_len`. pub(crate) fn write_to<E: Endian>( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { fn bool_to_int(b: bool) -> u32 { if b { 1 } else { 0 } } let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("flag bitset")); } let bits = (bool_to_int(self.has_empty) << 0) | (bool_to_int(self.is_utf8) << 1) | (bool_to_int(self.is_always_start_anchored) << 2); E::write_u32(bits, dst); Ok(nwrite) } /// Returns the number of bytes the serialized form of these flags /// will use. pub(crate) fn write_to_len(&self) -> usize { size_of::<u32>() } } /// An iterator over all states in a DFA. /// /// This iterator yields a tuple for each state. The first element of the /// tuple corresponds to a state's identifier, and the second element /// corresponds to the state itself (comprised of its transitions). /// /// `'a` corresponding to the lifetime of original DFA, `T` corresponds to /// the type of the transition table itself. pub(crate) struct StateIter<'a, T> { tt: &'a TransitionTable<T>, it: iter::Enumerate<slice::Chunks<'a, StateID>>, } impl<'a, T: AsRef<[u32]>> Iterator for StateIter<'a, T> { type Item = State<'a>; fn next(&mut self) -> Option<State<'a>> { self.it.next().map(|(index, _)| { let id = self.tt.to_state_id(index); self.tt.state(id) }) } } /// An immutable representation of a single DFA state. /// /// `'a` correspondings to the lifetime of a DFA's transition table. pub(crate) struct State<'a> { id: StateID, stride2: usize, transitions: &'a [StateID], } impl<'a> State<'a> { /// Return an iterator over all transitions in this state. This yields /// a number of transitions equivalent to the alphabet length of the /// corresponding DFA. /// /// Each transition is represented by a tuple. The first element is /// the input byte for that transition and the second element is the /// transitions itself. pub(crate) fn transitions(&self) -> StateTransitionIter<'_> { StateTransitionIter { len: self.transitions.len(), it: self.transitions.iter().enumerate(), } } /// Return an iterator over a sparse representation of the transitions in /// this state. Only non-dead transitions are returned. /// /// The "sparse" representation in this case corresponds to a sequence of /// triples. The first two elements of the triple comprise an inclusive /// byte range while the last element corresponds to the transition taken /// for all bytes in the range. /// /// This is somewhat more condensed than the classical sparse /// representation (where you have an element for every non-dead /// transition), but in practice, checking if a byte is in a range is very /// cheap and using ranges tends to conserve quite a bit more space. pub(crate) fn sparse_transitions(&self) -> StateSparseTransitionIter<'_> { StateSparseTransitionIter { dense: self.transitions(), cur: None } } /// Returns the identifier for this state. pub(crate) fn id(&self) -> StateID { self.id } /// Analyzes this state to determine whether it can be accelerated. If so, /// it returns an accelerator that contains at least one byte. #[cfg(feature = "dfa-build")] fn accelerate(&self, classes: &ByteClasses) -> Option<Accel> { // We just try to add bytes to our accelerator. Once adding fails // (because we've added too many bytes), then give up. let mut accel = Accel::new(); for (class, id) in self.transitions() { if id == self.id() { continue; } for unit in classes.elements(class) { if let Some(byte) = unit.as_u8() { if !accel.add(byte) { return None; } } } } if accel.is_empty() { None } else { Some(accel) } } } impl<'a> fmt::Debug for State<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, (start, end, sid)) in self.sparse_transitions().enumerate() { let id = if f.alternate() { sid.as_usize() } else { sid.as_usize() >> self.stride2 }; if i > 0 { write!(f, ", ")?; } if start == end { write!(f, "{:?} => {:?}", start, id)?; } else { write!(f, "{:?}-{:?} => {:?}", start, end, id)?; } } Ok(()) } } /// An iterator over all transitions in a single DFA state. This yields /// a number of transitions equivalent to the alphabet length of the /// corresponding DFA. /// /// Each transition is represented by a tuple. The first element is the input /// byte for that transition and the second element is the transition itself. #[derive(Debug)] pub(crate) struct StateTransitionIter<'a> { len: usize, it: iter::Enumerate<slice::Iter<'a, StateID>>, } impl<'a> Iterator for StateTransitionIter<'a> { type Item = (alphabet::Unit, StateID); fn next(&mut self) -> Option<(alphabet::Unit, StateID)> { self.it.next().map(|(i, &id)| { let unit = if i + 1 == self.len { alphabet::Unit::eoi(i) } else { let b = u8::try_from(i) .expect("raw byte alphabet is never exceeded"); alphabet::Unit::u8(b) }; (unit, id) }) } } /// An iterator over all non-DEAD transitions in a single DFA state using a /// sparse representation. /// /// Each transition is represented by a triple. The first two elements of the /// triple comprise an inclusive byte range while the last element corresponds /// to the transition taken for all bytes in the range. /// /// As a convenience, this always returns `alphabet::Unit` values of the same /// type. That is, you'll never get a (byte, EOI) or a (EOI, byte). Only (byte, /// byte) and (EOI, EOI) values are yielded. #[derive(Debug)] pub(crate) struct StateSparseTransitionIter<'a> { dense: StateTransitionIter<'a>, cur: Option<(alphabet::Unit, alphabet::Unit, StateID)>, } impl<'a> Iterator for StateSparseTransitionIter<'a> { type Item = (alphabet::Unit, alphabet::Unit, StateID); fn next(&mut self) -> Option<(alphabet::Unit, alphabet::Unit, StateID)> { while let Some((unit, next)) = self.dense.next() { let (prev_start, prev_end, prev_next) = match self.cur { Some(t) => t, None => { self.cur = Some((unit, unit, next)); continue; } }; if prev_next == next && !unit.is_eoi() { self.cur = Some((prev_start, unit, prev_next)); } else { self.cur = Some((unit, unit, next)); if prev_next != DEAD { return Some((prev_start, prev_end, prev_next)); } } } if let Some((start, end, next)) = self.cur.take() { if next != DEAD { return Some((start, end, next)); } } None } } /// An error that occurred during the construction of a DFA. /// /// This error does not provide many introspection capabilities. There are /// generally only two things you can do with it: /// /// * Obtain a human readable message via its `std::fmt::Display` impl. /// * Access an underlying [`nfa::thompson::BuildError`](thompson::BuildError) /// type from its `source` method via the `std::error::Error` trait. This error /// only occurs when using convenience routines for building a DFA directly /// from a pattern string. /// /// When the `std` feature is enabled, this implements the `std::error::Error` /// trait. #[cfg(feature = "dfa-build")] #[derive(Clone, Debug)] pub struct BuildError { kind: BuildErrorKind, } /// The kind of error that occurred during the construction of a DFA. /// /// Note that this error is non-exhaustive. Adding new variants is not /// considered a breaking change. #[cfg(feature = "dfa-build")] #[derive(Clone, Debug)] enum BuildErrorKind { /// An error that occurred while constructing an NFA as a precursor step /// before a DFA is compiled. NFA(thompson::BuildError), /// An error that occurred because an unsupported regex feature was used. /// The message string describes which unsupported feature was used. /// /// The primary regex feature that is unsupported by DFAs is the Unicode /// word boundary look-around assertion (`\b`). This can be worked around /// by either using an ASCII word boundary (`(?-u:\b)`) or by enabling /// Unicode word boundaries when building a DFA. Unsupported(&'static str), /// An error that occurs if too many states are produced while building a /// DFA. TooManyStates, /// An error that occurs if too many start states are needed while building /// a DFA. /// /// This is a kind of oddball error that occurs when building a DFA with /// start states enabled for each pattern and enough patterns to cause /// the table of start states to overflow `usize`. TooManyStartStates, /// This is another oddball error that can occur if there are too many /// patterns spread out across too many match states. TooManyMatchPatternIDs, /// An error that occurs if the DFA got too big during determinization. DFAExceededSizeLimit { limit: usize }, /// An error that occurs if auxiliary storage (not the DFA) used during /// determinization got too big. DeterminizeExceededSizeLimit { limit: usize }, } #[cfg(feature = "dfa-build")] impl BuildError { /// Return the kind of this error. fn kind(&self) -> &BuildErrorKind { &self.kind } pub(crate) fn nfa(err: thompson::BuildError) -> BuildError { BuildError { kind: BuildErrorKind::NFA(err) } } pub(crate) fn unsupported_dfa_word_boundary_unicode() -> BuildError { let msg = "cannot build DFAs for regexes with Unicode word \ boundaries; switch to ASCII word boundaries, or \ heuristically enable Unicode word boundaries or use a \ different regex engine"; BuildError { kind: BuildErrorKind::Unsupported(msg) } } pub(crate) fn too_many_states() -> BuildError { BuildError { kind: BuildErrorKind::TooManyStates } } pub(crate) fn too_many_start_states() -> BuildError { BuildError { kind: BuildErrorKind::TooManyStartStates } } pub(crate) fn too_many_match_pattern_ids() -> BuildError { BuildError { kind: BuildErrorKind::TooManyMatchPatternIDs } } pub(crate) fn dfa_exceeded_size_limit(limit: usize) -> BuildError { BuildError { kind: BuildErrorKind::DFAExceededSizeLimit { limit } } } pub(crate) fn determinize_exceeded_size_limit(limit: usize) -> BuildError { BuildError { kind: BuildErrorKind::DeterminizeExceededSizeLimit { limit }, } } } #[cfg(all(feature = "std", feature = "dfa-build"))] impl std::error::Error for BuildError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self.kind() { BuildErrorKind::NFA(ref err) => Some(err), _ => None, } } } #[cfg(feature = "dfa-build")] impl core::fmt::Display for BuildError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self.kind() { BuildErrorKind::NFA(_) => write!(f, "error building NFA"), BuildErrorKind::Unsupported(ref msg) => { write!(f, "unsupported regex feature for DFAs: {}", msg) } BuildErrorKind::TooManyStates => write!( f, "number of DFA states exceeds limit of {}", StateID::LIMIT, ), BuildErrorKind::TooManyStartStates => { let stride = Start::len(); // The start table has `stride` entries for starting states for // the entire DFA, and then `stride` entries for each pattern // if start states for each pattern are enabled (which is the // only way this error can occur). Thus, the total number of // patterns that can fit in the table is `stride` less than // what we can allocate. let max = usize::try_from(core::isize::MAX).unwrap(); let limit = (max - stride) / stride; write!( f, "compiling DFA with start states exceeds pattern \ pattern limit of {}", limit, ) } BuildErrorKind::TooManyMatchPatternIDs => write!( f, "compiling DFA with total patterns in all match states \ exceeds limit of {}", PatternID::LIMIT, ), BuildErrorKind::DFAExceededSizeLimit { limit } => write!( f, "DFA exceeded size limit of {:?} during determinization", limit, ), BuildErrorKind::DeterminizeExceededSizeLimit { limit } => { write!(f, "determinization exceeded size limit of {:?}", limit) } } } } #[cfg(all(test, feature = "syntax", feature = "dfa-build"))] mod tests { use super::*; #[test] fn errors_with_unicode_word_boundary() { let pattern = r"\b"; assert!(Builder::new().build(pattern).is_err()); } #[test] fn roundtrip_never_match() { let dfa = DFA::never_match().unwrap(); let (buf, _) = dfa.to_bytes_native_endian(); let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0; assert_eq!(None, dfa.try_search_fwd(&Input::new("foo12345")).unwrap()); } #[test] fn roundtrip_always_match() { use crate::HalfMatch; let dfa = DFA::always_match().unwrap(); let (buf, _) = dfa.to_bytes_native_endian(); let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0; assert_eq!( Some(HalfMatch::must(0, 0)), dfa.try_search_fwd(&Input::new("foo12345")).unwrap() ); } // See the analogous test in src/hybrid/dfa.rs. #[test] fn heuristic_unicode_reverse() { let dfa = DFA::builder() .configure(DFA::config().unicode_word_boundary(true)) .thompson(thompson::Config::new().reverse(true)) .build(r"\b[0-9]+\b") .unwrap(); let input = Input::new("β123").range(2..); let expected = MatchError::quit(0xB2, 1); let got = dfa.try_search_rev(&input); assert_eq!(Err(expected), got); let input = Input::new("123β").range(..3); let expected = MatchError::quit(0xCE, 3); let got = dfa.try_search_rev(&input); assert_eq!(Err(expected), got); } } <file_sep>/regex-cli/cmd/find/mod.rs use lexopt::{Arg, Parser}; use crate::args::{self, Configurable, Usage}; mod capture; mod half; mod r#match; mod which; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search. The sub-command determines what kind of search to execute. The kind of the search to execute also determines which regex engines are available. For example, a lazy DFA cannot report the match positions of capture groups, so there is no 'regex-cli find capture hybrid' command. USAGE: regex-cli find <command> COMMANDS: capture Search for regexes with capture groups. half Search for half matches. match Search for full matches. which Search for which patterns match in a set. "; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "capture" => capture::run(p), "half" => half::run(p), "match" => r#match::run(p), "which" => which::run(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } #[derive(Debug, Default)] struct Config { count: bool, repeat: Option<u32>, } impl Config { fn repeat(&self) -> u32 { self.repeat.unwrap_or(1) } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('c') | Arg::Long("count") => { self.count = true; } Arg::Long("repeat") => { self.repeat = Some(args::parse(p, "--repeat")?); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &[Usage] = &[ Usage::new( "-c, --count", "Show a count of all matches.", r#" Prints a count of all matches instead of printing the matches themselves. For the 'capture' command, this prints the number of times each group matched. For the 'which' command, this just prints whether each pattern matched or not. "#, ), Usage::new( "--repeat", "Repeat the search this many times.", r#" Repeat the search this many times. By default, this is set to 1. This is useful when you want the search time to dominate the runtime of the program, or if the search is otherwise too short/fast to measure reliably. Note that this will print the matches repeatedly by default as well. For this reason, it's usually best to use this option in combination with -c/--count. "#, ), ]; USAGES } } <file_sep>/regex-cli/cmd/generate/serialize/dfa.rs // The code in this module honestly sucks. I did at one point try and make it a // little more composable, particularly with respect to the stuff that writes // the Rust code, but it became an unintelligble mess. Instead, I squashed // it down into four functions: dense DFAs, dense regexes, sparse DFAs and // sparse regexes. And each of those functions handles the 'regex-automata', // 'once-cell' and 'lazy-static' variants. So that's 12 different variants. // There's *some* sharing within each function at least... // // With that said, I don't expect this code generation task to expand much. // We'll probably support std's OnceCell once that stabilizes, but otherwise, // I think we'll be stuck with just full DFAs for the time being. If and when // the code generation task expands to other objects (NFAs?), maybe we should // reconsider how this code is structured. use std::{ io::Write, path::{Path, PathBuf}, }; use { anyhow::Context, lexopt::{Arg, Parser, ValueExt}, regex_automata::dfa::{dense, sparse}, }; use crate::{ args::{self, Usage}, util, }; pub fn run_dense(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Serializes a dense fully compiled DFA (or regex DFA) to disk. USAGE: regex-cli generate serialize dense <command> ENGINES: dfa Serialize a fully compiled dense DFA. regex Serialize a pair of fully compiled dense DFAs as a regex. "; match &*args::next_as_command(USAGE, p)? { "dfa" => run_dense_dfa(p), "regex" => run_dense_regex(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_dense_dfa(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Serializes a dense fully compiled DFA to disk. USAGE: regex-cli generate serialize dense dfa <name> <outdir> [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut config = Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ // This needs to come first, as it greedily parses the first // two positional parameters, and then 'patterns' takes the rest. &mut config, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let asts = syntax.asts(&pats)?; let hirs = syntax.hirs(&pats, &asts)?; let nfa = thompson.from_hirs(&hirs)?; let dfa = dfa.from_nfa(&nfa)?; let wtr = config.writer()?; wtr.write_dfa_dense_bytes(&dfa, "")?; wtr.write_dfa_dense_rust()?; Ok(()) } fn run_dense_regex(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Serializes a dense fully compiled DFA regex to disk. USAGE: regex-cli generate serialize dense regex <name> <outdir> [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut config = Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ // This needs to come first, as it greedily parses the first // two positional parameters, and then 'patterns' takes the rest. &mut config, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let asts = syntax.asts(&pats)?; let hirs = syntax.hirs(&pats, &asts)?; let nfafwd = thompson.from_hirs(&hirs)?; let nfarev = thompson.reversed().from_hirs(&hirs)?; let dfafwd = dfa.from_nfa(&nfafwd)?; let dfarev = dfa.reversed().from_nfa(&nfarev)?; let wtr = config.writer()?; wtr.write_dfa_dense_bytes(&dfafwd, "_fwd")?; wtr.write_dfa_dense_bytes(&dfarev, "_rev")?; wtr.write_regex_dense_rust()?; Ok(()) } pub fn run_sparse(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Serializes a sparse fully compiled DFA (or regex DFA) to disk. USAGE: regex-cli generate serialize sparse <command> ENGINES: dfa Serialize a fully compiled sparse DFA. regex Serialize a pair of fully compiled sparse DFAs as a regex. "; match &*args::next_as_command(USAGE, p)? { "dfa" => run_sparse_dfa(p), "regex" => run_sparse_regex(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_sparse_dfa(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Serializes a sparse fully compiled DFA to disk. USAGE: regex-cli generate serialize sparse dfa <name> <outdir> [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut config = Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ // This needs to come first, as it greedily parses the first // two positional parameters, and then 'patterns' takes the rest. &mut config, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let asts = syntax.asts(&pats)?; let hirs = syntax.hirs(&pats, &asts)?; let nfa = thompson.from_hirs(&hirs)?; let dfa = dfa.from_nfa_sparse(&nfa)?; let wtr = config.writer()?; wtr.write_dfa_sparse_bytes(&dfa, "")?; wtr.write_dfa_sparse_rust()?; Ok(()) } fn run_sparse_regex(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Serializes a sparse fully compiled DFA regex to disk. USAGE: regex-cli generate serialize sparse regex <name> <outdir> [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut config = Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ // This needs to come first, as it greedily parses the first // two positional parameters, and then 'patterns' takes the rest. &mut config, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let asts = syntax.asts(&pats)?; let hirs = syntax.hirs(&pats, &asts)?; let nfafwd = thompson.from_hirs(&hirs)?; let nfarev = thompson.reversed().from_hirs(&hirs)?; let dfafwd = dfa.from_nfa_sparse(&nfafwd)?; let dfarev = dfa.reversed().from_nfa_sparse(&nfarev)?; let wtr = config.writer()?; wtr.write_dfa_sparse_bytes(&dfafwd, "_fwd")?; wtr.write_dfa_sparse_bytes(&dfarev, "_rev")?; wtr.write_regex_sparse_rust()?; Ok(()) } #[derive(Debug, Default)] struct Config { name: Option<String>, outdir: Option<PathBuf>, safe: bool, rust_kind: RustKind, rustfmt: bool, } impl Config { fn writer(&self) -> anyhow::Result<Writer> { let varname = self.name()?.to_string(); let modname = rust_module_name(&varname); Ok(Writer { varname, modname, outdir: self.outdir()?.to_path_buf(), safe: self.safe, rust_kind: self.rust_kind, rustfmt: self.rustfmt, }) } fn name(&self) -> anyhow::Result<&str> { self.name.as_deref().ok_or_else(|| anyhow::anyhow!("missing <name>")) } fn outdir(&self) -> anyhow::Result<&Path> { self.outdir .as_deref() .ok_or_else(|| anyhow::anyhow!("missing <outdir>")) } } impl args::Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("safe") => { self.safe = true; } Arg::Long("rust") => { self.rust_kind = args::parse(p, "--rust")?; } Arg::Long("rustfmt") => { self.rustfmt = true; } Arg::Value(ref mut value) => { if self.name.is_none() { let v = std::mem::take(value); self.name = Some( v.string().context("<name> must be valid UTF-8")?, ); } else if self.outdir.is_none() { self.outdir = Some(PathBuf::from(std::mem::take(value))); } else { return Ok(false); } } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "--safe", "If possible, only use safe Rust code to deserialize DFAs.", r#" If possible, only use safe Rust code to deserialize DFAs. Generally speaking, this requires a more expensive deserialization process, as it needs to verify that every transition in the DFA points to a valid state. In other words, it does work that is proportional to the size of every transition in the DFA. Where as using deserialization that isn't safe will execute in constant time. Whether the extra checks take too much time depends. They probably don't. With that said, it is always correct to *not* use this flag, as the serialized DFA is generated by this crate. That is, the only way undefined behavior can occur is if there is a bug in the implementation of serialization or deserialization. Therefore, cases where --safe should be used are somewhat limited. For example, if the risk of a bug in the implementation is too high or if you're just paranoid. "#, ), Usage::new( "--rust <kind>", "Choose from: regex-automata, once-cell, lazy-static, none.", r#" This flag permits one to specify how the DFA is initially loaded. The default is regex-automata, which will use its 'util::lazy::Lazy' type. The 'Lazy' type works in no-std and no-alloc contexts. Otherwise, 'once-cell' will use the 'Lazy' type from the once_cell crate. 'lazy-static' will use the 'lazy_static!' macro from the lazy_static crate. And 'none' will not generate any Rust source code at all. "#, ), Usage::new( "--rustfmt", "Run rustfmt on the generated code.", r#" When set, rustfmt is run on the generated code. Without rustfmt, the code formatting may be arbitrarily bad. "#, ), ]; USAGES } } #[derive(Clone, Copy, Debug)] enum RustKind { RegexAutomata, OnceCell, LazyStatic, None, } impl Default for RustKind { fn default() -> RustKind { RustKind::RegexAutomata } } impl std::str::FromStr for RustKind { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<RustKind> { match s { "regex-automata" => Ok(RustKind::RegexAutomata), "once-cell" => Ok(RustKind::OnceCell), "lazy-static" => Ok(RustKind::LazyStatic), "none" => Ok(RustKind::None), unk => anyhow::bail!("unrecognized rust output kind: '{}'", unk), } } } #[derive(Debug)] struct Writer { outdir: PathBuf, varname: String, modname: String, safe: bool, rust_kind: RustKind, rustfmt: bool, } impl Writer { fn write_dfa_dense_bytes( &self, dfa: &dense::DFA<Vec<u32>>, name_suffix: &str, ) -> anyhow::Result<()> { let (big_bytes, _) = dfa.to_bytes_big_endian(); let (lil_bytes, _) = dfa.to_bytes_little_endian(); let big_path = self.outdir.join(self.big_name(name_suffix)); let lil_path = self.outdir.join(self.lil_name(name_suffix)); std::fs::write(&big_path, &big_bytes) .with_context(|| format!("{}", big_path.display()))?; std::fs::write(&lil_path, &lil_bytes) .with_context(|| format!("{}", lil_path.display()))?; Ok(()) } fn write_dfa_sparse_bytes( &self, dfa: &sparse::DFA<Vec<u8>>, name_suffix: &str, ) -> anyhow::Result<()> { let big_bytes = dfa.to_bytes_big_endian(); let lil_bytes = dfa.to_bytes_little_endian(); let big_path = self.outdir.join(self.big_name(name_suffix)); let lil_path = self.outdir.join(self.lil_name(name_suffix)); std::fs::write(&big_path, &big_bytes) .with_context(|| format!("{}", big_path.display()))?; std::fs::write(&lil_path, &lil_bytes) .with_context(|| format!("{}", lil_path.display()))?; Ok(()) } fn write_dfa_dense_rust(&self) -> anyhow::Result<()> { if matches!(self.rust_kind, RustKind::None) { return Ok(()); } let outpath = self.outdir.join(format!("{}.rs", self.modname)); let mut wtr = std::fs::File::create(&outpath) .with_context(|| outpath.display().to_string())?; let auto_gen_message = self.auto_gen_message().trim().to_string(); let name = &self.varname; let bigname = self.big_name(""); let lilname = self.lil_name(""); let from_bytes = if self.safe { "DFA::from_bytes(&ALIGNED.bytes)" } else { "unsafe { DFA::from_bytes_unchecked(&ALIGNED.bytes) }" }; let deserialize = format!( r##" static ALIGNED: &AlignAs<[u8], u32> = &AlignAs {{ _align: [], #[cfg(target_endian = "big")] bytes: *include_bytes!("{bigname}"), #[cfg(target_endian = "little")] bytes: *include_bytes!("{lilname}"), }}; let (dfa, _) = {from_bytes}.expect("serialized DFA should be valid"); dfa "##, ); match self.rust_kind { RustKind::RegexAutomata => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::{{ dfa::dense::DFA, util::{{lazy::Lazy, wire::AlignAs}}, }}; pub static {name}: Lazy<DFA<&'static [u32]>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::OnceCell => { writeln!( wtr, r##" {auto_gen_message} use {{ once_cell::sync::Lazy, regex_automata::{{ dfa::dense::DFA, util::wire::AlignAs, }}, }}; pub static {name}: Lazy<DFA<&'static [u32]>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::LazyStatic => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::{{ dfa::dense::DFA, util::wire::AlignAs, }}; lazy_static::lazy_static! {{ pub static ref {name}: DFA<&'static [u32]> = {{ {deserialize} }}; }} "##, )?; } RustKind::None => unreachable!(), } if self.rustfmt { util::rustfmt(&outpath)?; } Ok(()) } fn write_dfa_sparse_rust(&self) -> anyhow::Result<()> { if matches!(self.rust_kind, RustKind::None) { return Ok(()); } let outpath = self.outdir.join(format!("{}.rs", self.modname)); let mut wtr = std::fs::File::create(&outpath) .with_context(|| outpath.display().to_string())?; let auto_gen_message = self.auto_gen_message().trim().to_string(); let name = &self.varname; let bigname = self.big_name(""); let lilname = self.lil_name(""); let from_bytes = if self.safe { "DFA::from_bytes(BYTES)" } else { "unsafe { DFA::from_bytes_unchecked(BYTES) }" }; let deserialize = format!( r##" #[cfg(target_endian = "big")] static BYTES: &'static [u8] = include_bytes!("{bigname}"); #[cfg(target_endian = "little")] static BYTES: &'static [u8] = include_bytes!("{lilname}"); let (dfa, _) = {from_bytes}.expect("serialized DFA should be valid"); dfa "##, ); match self.rust_kind { RustKind::RegexAutomata => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::{{ dfa::sparse::DFA, util::lazy::Lazy, }}; pub static {name}: Lazy<DFA<&'static [u8]>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::OnceCell => { writeln!( wtr, r##" {auto_gen_message} use {{ once_cell::sync::Lazy, regex_automata::dfa::sparse::DFA, }}; pub static {name}: Lazy<DFA<&'static [u8]>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::LazyStatic => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::dfa::sparse::DFA; lazy_static::lazy_static! {{ pub static ref {name}: DFA<&'static [u8]> = {{ {deserialize} }}; }} "##, )?; } RustKind::None => unreachable!(), } if self.rustfmt { util::rustfmt(&outpath)?; } Ok(()) } fn write_regex_dense_rust(&self) -> anyhow::Result<()> { if matches!(self.rust_kind, RustKind::None) { return Ok(()); } let outpath = self.outdir.join(format!("{}.rs", self.modname)); let mut wtr = std::fs::File::create(&outpath) .with_context(|| outpath.display().to_string())?; let auto_gen_message = self.auto_gen_message().trim().to_string(); let name = &self.varname; let fwdbigname = self.big_name("_fwd"); let fwdlilname = self.lil_name("_fwd"); let revbigname = self.big_name("_rev"); let revlilname = self.lil_name("_rev"); let from_bytes = if self.safe { "DFA::from_bytes(&ALIGNED.bytes)" } else { "unsafe { DFA::from_bytes_unchecked(&ALIGNED.bytes) }" }; let deserialize = format!( r##" let dfafwd = {{ static ALIGNED: &AlignAs<[u8], u32> = &AlignAs {{ _align: [], #[cfg(target_endian = "big")] bytes: *include_bytes!("{fwdbigname}"), #[cfg(target_endian = "little")] bytes: *include_bytes!("{fwdlilname}"), }}; {from_bytes}.expect("serialized forward DFA should be valid").0 }}; let dfarev = {{ static ALIGNED: &AlignAs<[u8], u32> = &AlignAs {{ _align: [], #[cfg(target_endian = "big")] bytes: *include_bytes!("{revbigname}"), #[cfg(target_endian = "little")] bytes: *include_bytes!("{revlilname}"), }}; {from_bytes}.expect("serialized reverse DFA should be valid").0 }}; Regex::builder().build_from_dfas(dfafwd, dfarev) "##, ); match self.rust_kind { RustKind::RegexAutomata => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::{{ dfa::{{dense::DFA, regex::Regex}}, util::{{lazy::Lazy, wire::AlignAs}}, }}; pub static {name}: Lazy<Regex<DFA<&'static [u32]>>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::OnceCell => { writeln!( wtr, r##" {auto_gen_message} use {{ once_cell::sync::Lazy, regex_automata::{{ dfa::{{dense::DFA, regex::Regex}}, util::wire::AlignAs, }}, }}; pub static {name}: Lazy<Regex<DFA<&'static [u32]>>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::LazyStatic => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::{{ dfa::{{dense::DFA, regex::Regex}}, util::wire::AlignAs, }}; lazy_static::lazy_static! {{ pub static ref {name}: Regex<DFA<&'static [u32]>> = {{ {deserialize} }}; }} "##, )?; } RustKind::None => unreachable!(), } if self.rustfmt { util::rustfmt(&outpath)?; } Ok(()) } fn write_regex_sparse_rust(&self) -> anyhow::Result<()> { if matches!(self.rust_kind, RustKind::None) { return Ok(()); } let outpath = self.outdir.join(format!("{}.rs", self.modname)); let mut wtr = std::fs::File::create(&outpath) .with_context(|| outpath.display().to_string())?; let auto_gen_message = self.auto_gen_message().trim().to_string(); let name = &self.varname; let fwdbigname = self.big_name("_fwd"); let fwdlilname = self.lil_name("_fwd"); let revbigname = self.big_name("_rev"); let revlilname = self.lil_name("_rev"); let from_bytes = if self.safe { "DFA::from_bytes(BYTES)" } else { "unsafe { DFA::from_bytes_unchecked(BYTES) }" }; let deserialize = format!( r##" let dfafwd = {{ #[cfg(target_endian = "big")] static BYTES: &'static [u8] = include_bytes!("{fwdbigname}"); #[cfg(target_endian = "little")] static BYTES: &'static [u8] = include_bytes!("{fwdlilname}"); {from_bytes}.expect("serialized forward DFA should be valid").0 }}; let dfarev = {{ #[cfg(target_endian = "big")] static BYTES: &'static [u8] = include_bytes!("{revbigname}"); #[cfg(target_endian = "little")] static BYTES: &'static [u8] = include_bytes!("{revlilname}"); {from_bytes}.expect("serialized reverse DFA should be valid").0 }}; Regex::builder().build_from_dfas(dfafwd, dfarev) "##, ); match self.rust_kind { RustKind::RegexAutomata => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::{{ dfa::{{regex::Regex, sparse::DFA}}, util::lazy::Lazy, }}; pub static {name}: Lazy<Regex<DFA<&'static [u8]>>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::OnceCell => { writeln!( wtr, r##" {auto_gen_message} use {{ once_cell::sync::Lazy, regex_automata::dfa::{{regex::Regex, sparse::DFA}}, }}; pub static {name}: Lazy<Regex<DFA<&'static [u8]>>> = Lazy::new(|| {{ {deserialize} }}); "##, )?; } RustKind::LazyStatic => { writeln!( wtr, r##" {auto_gen_message} use regex_automata::dfa::{{regex::Regex, sparse::DFA}}; lazy_static::lazy_static! {{ pub static ref {name}: Regex<DFA<&'static [u8]>> = {{ {deserialize} }}; }} "##, )?; } RustKind::None => unreachable!(), } if self.rustfmt { util::rustfmt(&outpath)?; } Ok(()) } fn auto_gen_message(&self) -> String { let version = env!("CARGO_PKG_VERSION"); let cmd = std::env::args_os() .map(|a| a.to_string_lossy().into_owned()) .map(|a| { if a.contains('\n') { "<snip: arg too long>".to_string() } else { a } }) .collect::<Vec<String>>() .join(" "); format!( r#" // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // // {cmd} // // regex-cli {version} is available on crates.io. "# ) } fn big_name(&self, name_suffix: &str) -> String { format!("{}{}.bigendian.dfa", self.modname, name_suffix) } fn lil_name(&self, name_suffix: &str) -> String { format!("{}{}.littleendian.dfa", self.modname, name_suffix) } } fn rust_module_name(var_name: &str) -> String { var_name.to_ascii_lowercase() } <file_sep>/regex-automata/tests/nfa/thompson/mod.rs #[cfg(feature = "nfa-backtrack")] mod backtrack; #[cfg(feature = "nfa-pikevm")] mod pikevm; <file_sep>/testdata/regex-lite.toml # These tests are specifically written to test the regex-lite crate. While it # largely has the same semantics as the regex crate, there are some differences # around Unicode support and UTF-8. # # To be clear, regex-lite supports far fewer patterns because of its lack of # Unicode support, nested character classes and character class set operations. # What we're talking about here are the patterns that both crates support but # where the semantics might differ. # regex-lite uses ASCII definitions for Perl character classes. [[test]] name = "perl-class-decimal" regex = '\d' haystack = '᠕' matches = [] unicode = true # regex-lite uses ASCII definitions for Perl character classes. [[test]] name = "perl-class-space" regex = '\s' haystack = "\u2000" matches = [] unicode = true # regex-lite uses ASCII definitions for Perl character classes. [[test]] name = "perl-class-word" regex = '\w' haystack = 'δ' matches = [] unicode = true # regex-lite uses the ASCII definition of word for word boundary assertions. [[test]] name = "word-boundary" regex = '\b' haystack = 'δ' matches = [] unicode = true # regex-lite uses the ASCII definition of word for negated word boundary # assertions. But note that it should still not split codepoints! [[test]] name = "word-boundary-negated" regex = '\B' haystack = 'δ' matches = [[0, 0], [2, 2]] unicode = true # While we're here, the empty regex---which matches at every # position---shouldn't split a codepoint either. [[test]] name = "empty-no-split-codepoint" regex = '' haystack = '💩' matches = [[0, 0], [4, 4]] unicode = true # A dot always matches a full codepoint. [[test]] name = "dot-always-matches-codepoint" regex = '.' haystack = '💩' matches = [[0, 4]] unicode = false # A negated character class also always matches a full codepoint. [[test]] name = "negated-class-always-matches-codepoint" regex = '[^a]' haystack = '💩' matches = [[0, 4]] unicode = false # regex-lite only supports ASCII-aware case insensitive matching. [[test]] name = "case-insensitive-is-ascii-only" regex = 's' haystack = 'ſ' matches = [] unicode = true case-insensitive = true # Negated word boundaries shouldn't split a codepoint, but they will match # between invalid UTF-8. # # This test is only valid for a 'bytes' API, but that doesn't (yet) exist in # regex-lite. This can't happen in the main API because &str can't contain # invalid UTF-8. # [[test]] # name = "word-boundary-invalid-utf8" # regex = '\B' # haystack = '\xFF\xFF\xFF\xFF' # unescape = true # matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] # unicode = true # utf8 = false <file_sep>/tests/suite_string_set.rs use { anyhow::Result, regex::{RegexSet, RegexSetBuilder}, regex_test::{CompiledRegex, RegexTest, TestResult, TestRunner}, }; /// Tests the default configuration of the hybrid NFA/DFA. #[test] fn default() -> Result<()> { let mut runner = TestRunner::new()?; runner .expand(&["is_match", "which"], |test| test.compiles()) .blacklist_iter(super::BLACKLIST) .test_iter(crate::suite()?.iter(), compiler) .assert(); Ok(()) } fn run_test(re: &RegexSet, test: &RegexTest) -> TestResult { let hay = match std::str::from_utf8(test.haystack()) { Ok(hay) => hay, Err(err) => { return TestResult::fail(&format!( "haystack is not valid UTF-8: {}", err )); } }; match test.additional_name() { "is_match" => TestResult::matched(re.is_match(hay)), "which" => TestResult::which(re.matches(hay).iter()), name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Converts the given regex test to a closure that searches with a /// `bytes::Regex`. If the test configuration is unsupported, then a /// `CompiledRegex` that skips the test is returned. fn compiler( test: &RegexTest, _patterns: &[String], ) -> anyhow::Result<CompiledRegex> { let skip = Ok(CompiledRegex::skip()); // The top-level RegexSet API only supports "overlapping" semantics. if !matches!(test.search_kind(), regex_test::SearchKind::Overlapping) { return skip; } // The top-level RegexSet API only supports "all" semantics. if !matches!(test.match_kind(), regex_test::MatchKind::All) { return skip; } // The top-level RegexSet API always runs unanchored searches. if test.anchored() { return skip; } // We don't support tests with explicit search bounds. let bounds = test.bounds(); if !(bounds.start == 0 && bounds.end == test.haystack().len()) { return skip; } // The Regex API specifically does not support disabling UTF-8 mode because // it can only search &str which is always valid UTF-8. if !test.utf8() { return skip; } // If the test requires Unicode but the Unicode feature isn't enabled, // skip it. This is a little aggressive, but the test suite doesn't // have any easy way of communicating which Unicode features are needed. if test.unicode() && !cfg!(feature = "unicode") { return skip; } let re = RegexSetBuilder::new(test.regexes()) .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .line_terminator(test.line_terminator()) .build()?; Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) } <file_sep>/regex-capi/examples/compile #!/bin/sh set -ex # N.B. Add `--release` flag to `cargo build` to make the example run faster. cargo build --manifest-path ../Cargo.toml gcc -O3 -DDEBUG -o iter iter.c -ansi -Wall -I../include -L../../target/debug -lrure # If you're using librure.a, then you'll need to link other stuff: # -lutil -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil -lrure <file_sep>/testdata/expensive.toml # This file represent tests that may be expensive to run on some regex engines. # For example, tests that build a full DFA ahead of time and minimize it can # take a horrendously long time on regexes that are large (or result in an # explosion in the number of states). We group these tests together so that # such engines can simply skip these tests. # See: https://github.com/rust-lang/regex/issues/98 [[test]] name = "regression-many-repeat-no-stack-overflow" regex = '^.{1,2500}' haystack = "a" matches = [[0, 1]] # This test is meant to blow the bounded backtracker's visited capacity. In # order to do that, we need a somewhat sizeable regex. The purpose of this # is to make sure there's at least one test that exercises this path in the # backtracker. All other tests (at time of writing) are small enough that the # backtracker can handle them fine. [[test]] name = "backtrack-blow-visited-capacity" regex = '\pL{50}' haystack = "abcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyZZ" matches = [[0, 50], [50, 100], [100, 150]] <file_sep>/tests/fuzz/mod.rs // This set of tests is different from regression_fuzz in that the tests start // from the fuzzer data directly. The test essentially duplicates the fuzz // target. I wonder if there's a better way to set this up... Hmmm. I bet // `cargo fuzz` has something where it can run a target against crash files and // verify that they pass. // This case found by the fuzzer causes the meta engine to use the "reverse // inner" literal strategy. That in turn uses a specialized search routine // for the lazy DFA in order to avoid worst case quadratic behavior. That // specialized search routine had a bug where it assumed that start state // specialization was disabled. But this is indeed not the case, since it // reuses the "general" lazy DFA for the full regex created as part of the core // strategy, which might very well have start states specialized due to the // existence of a prefilter. // // This is a somewhat weird case because if the core engine has a prefilter, // then it's usually the case that the "reverse inner" optimization won't be // pursued in that case. But there are some heuristics that try to detect // whether a prefilter is "fast" or not. If it's not, then the meta engine will // attempt the reverse inner optimization. And indeed, that's what happens // here. So the reverse inner optimization ends up with a lazy DFA that has // start states specialized. Ideally this wouldn't happen because specializing // start states without a prefilter inside the DFA can be disastrous for // performance by causing the DFA to ping-pong in and out of the special state // handling. In this case, it's probably not a huge deal because the lazy // DFA is only used for part of the matching where as the work horse is the // prefilter found by the reverse inner optimization. // // We could maybe fix this by refactoring the meta engine to be a little more // careful. For example, by attempting the optimizations before building the // core engine. But this is perhaps a little tricky. #[test] fn meta_stopat_specialize_start_states() { let data = include_bytes!( "testdata/crash-8760b19b25d74e3603d4c643e9c7404fdd3631f9", ); let _ = run(data); } // Same bug as meta_stopat_specialize_start_states, but minimized by the // fuzzer. #[test] fn meta_stopat_specialize_start_states_min() { let data = include_bytes!( "testdata/minimized-from-8760b19b25d74e3603d4c643e9c7404fdd3631f9", ); let _ = run(data); } // This input generated a pattern with a fail state (e.g., \P{any}, [^\s\S] // or [a&&b]). But the fail state was in a branch, where a subsequent branch // should have led to an overall match, but handling of the fail state // prevented it from doing so. A hand-minimized version of this is '[^\s\S]A|B' // on the haystack 'B'. That should yield a match of 'B'. // // The underlying cause was an issue in how DFA determinization handled fail // states. The bug didn't impact the PikeVM or the bounded backtracker. #[test] fn fail_branch_prevents_match() { let data = include_bytes!( "testdata/crash-cd33b13df59ea9d74503986f9d32a270dd43cc04", ); let _ = run(data); } // This input generated a pattern that contained a sub-expression like this: // // a{0}{50000} // // This turned out to provoke quadratic behavior in the NFA compiler. // Basically, the NFA compiler works in two phases. The first phase builds // a more complicated-but-simpler-to-construct sequence of NFA states that // includes unconditional epsilon transitions. As part of converting this // sequence to the "final" NFA, we remove those unconditional espilon // transition. The code responsible for doing this follows every chain of // these transitions and remaps the state IDs. The way we were doing this // before resulted in re-following every subsequent part of the chain for each // state in the chain, which ended up being quadratic behavior. We effectively // memoized this, which fixed the performance bug. #[test] fn slow_big_empty_chain() { let data = include_bytes!( "testdata/slow-unit-9ca9cc9929fee1fcbb847a78384effb8b98ea18a", ); let _ = run(data); } // A different case of slow_big_empty_chain. #[test] fn slow_big_empty_chain2() { let data = include_bytes!( "testdata/slow-unit-3ab758ea520027fefd3f00e1384d9aeef155739e", ); let _ = run(data); } // A different case of slow_big_empty_chain. #[test] fn slow_big_empty_chain3() { let data = include_bytes!( "testdata/slow-unit-b8a052f4254802edbe5f569b6ce6e9b6c927e9d6", ); let _ = run(data); } // A different case of slow_big_empty_chain. #[test] fn slow_big_empty_chain4() { let data = include_bytes!( "testdata/slow-unit-93c73a43581f205f9aaffd9c17e52b34b17becd0", ); let _ = run(data); } // A different case of slow_big_empty_chain. #[test] fn slow_big_empty_chain5() { let data = include_bytes!( "testdata/slow-unit-5345fccadf3812c53c3ccc7af5aa2741b7b2106c", ); let _ = run(data); } // A different case of slow_big_empty_chain. #[test] fn slow_big_empty_chain6() { let data = include_bytes!( "testdata/slow-unit-6bd643eec330166e4ada91da2d3f284268481085", ); let _ = run(data); } // This fuzz input generated a pattern with a large repetition that would fail // NFA compilation, but its HIR was small. (HIR doesn't expand repetitions.) // But, the bounds were high enough that the minimum length calculation // overflowed. We fixed this by using saturating arithmetic (and also checked // arithmetic for the maximum length calculation). // // Incidentally, this was the only unguarded arithmetic operation performed in // the HIR smart constructors. And the fuzzer found it. Hah. Nice. #[test] fn minimum_len_overflow() { let data = include_bytes!( "testdata/crash-7eb3351f0965e5d6c1cb98aa8585949ef96531ff", ); let _ = run(data); } // This is the fuzz target function. We duplicate it here since this is the // thing we use to interpret the data. It is ultimately what we want to // succeed. fn run(data: &[u8]) -> Option<()> { if data.len() < 2 { return None; } let mut split_at = usize::from(data[0]); let data = std::str::from_utf8(&data[1..]).ok()?; // Split data into a regex and haystack to search. let len = usize::try_from(data.chars().count()).ok()?; split_at = std::cmp::max(split_at, 1) % len; let char_index = data.char_indices().nth(split_at)?.0; let (pattern, input) = data.split_at(char_index); let re = regex::Regex::new(pattern).ok()?; re.is_match(input); Some(()) } <file_sep>/regex-automata/tests/nfa/thompson/pikevm/suite.rs use { anyhow::Result, regex_automata::{ nfa::thompson::{ self, pikevm::{self, PikeVM}, }, util::{prefilter::Prefilter, syntax}, PatternSet, }, regex_test::{ CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, TestRunner, }, }; use crate::{create_input, suite, testify_captures, untestify_kind}; /// Tests the default configuration of the hybrid NFA/DFA. #[test] fn default() -> Result<()> { let builder = PikeVM::builder(); let mut runner = TestRunner::new()?; runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); runner.test_iter(suite()?.iter(), compiler(builder)).assert(); Ok(()) } /// Tests the PikeVM with prefilters enabled. #[test] fn prefilter() -> Result<()> { let my_compiler = |test: &RegexTest, regexes: &[String]| { // Parse regexes as HIRs so we can get literals to build a prefilter. let mut hirs = vec![]; for pattern in regexes.iter() { hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); } let kind = match untestify_kind(test.match_kind()) { None => return Ok(CompiledRegex::skip()), Some(kind) => kind, }; let pre = Prefilter::from_hirs_prefix(kind, &hirs); let mut builder = PikeVM::builder(); builder.configure(PikeVM::config().prefilter(pre)); compiler(builder)(test, regexes) }; let mut runner = TestRunner::new()?; runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); runner.test_iter(suite()?.iter(), my_compiler).assert(); Ok(()) } fn compiler( mut builder: pikevm::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { move |test, regexes| { if !configure_pikevm_builder(test, &mut builder) { return Ok(CompiledRegex::skip()); } let re = builder.build_many(&regexes)?; let mut cache = re.create_cache(); Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, &mut cache, test) })) } } fn run_test( re: &PikeVM, cache: &mut pikevm::Cache, test: &RegexTest, ) -> TestResult { let input = create_input(test); match test.additional_name() { "is_match" => TestResult::matched(re.is_match(cache, input)), "find" => match test.search_kind() { SearchKind::Earliest => { let it = re .find_iter(cache, input.earliest(true)) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }); TestResult::matches(it) } SearchKind::Leftmost => { let it = re .find_iter(cache, input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }); TestResult::matches(it) } SearchKind::Overlapping => { let mut patset = PatternSet::new(re.get_nfa().pattern_len()); re.which_overlapping_matches(cache, &input, &mut patset); TestResult::which(patset.iter().map(|p| p.as_usize())) } }, "captures" => match test.search_kind() { SearchKind::Earliest => { let it = re .captures_iter(cache, input.earliest(true)) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|caps| testify_captures(&caps)); TestResult::captures(it) } SearchKind::Leftmost => { let it = re .captures_iter(cache, input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|caps| testify_captures(&caps)); TestResult::captures(it) } SearchKind::Overlapping => { // There is no overlapping PikeVM API that supports captures. TestResult::skip() } }, name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Configures the given regex builder with all relevant settings on the given /// regex test. /// /// If the regex test has a setting that is unsupported, then this returns /// false (implying the test should be skipped). fn configure_pikevm_builder( test: &RegexTest, builder: &mut pikevm::Builder, ) -> bool { let match_kind = match untestify_kind(test.match_kind()) { None => return false, Some(k) => k, }; let pikevm_config = PikeVM::config().match_kind(match_kind); builder .configure(pikevm_config) .syntax(config_syntax(test)) .thompson(config_thompson(test)); true } /// Configuration of a Thompson NFA compiler from a regex test. fn config_thompson(test: &RegexTest) -> thompson::Config { let mut lookm = regex_automata::util::look::LookMatcher::new(); lookm.set_line_terminator(test.line_terminator()); thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) } /// Configuration of the regex parser from a regex test. fn config_syntax(test: &RegexTest) -> syntax::Config { syntax::Config::new() .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .utf8(test.utf8()) .line_terminator(test.line_terminator()) } <file_sep>/fuzz/oss-fuzz-build.sh #!/bin/bash -eu cd $SRC/regex cargo fuzz build -O --debug-assertions targets=( fuzz_regex_match fuzz_regex_lite_match fuzz_regex_automata_deserialize_dense_dfa fuzz_regex_automata_deserialize_sparse_dfa ast_roundtrip ast_fuzz_match ast_fuzz_regex ast_fuzz_match_bytes ) for target in "${targets[@]}"; do cp fuzz/target/x86_64-unknown-linux-gnu/release/$target $OUT/ done <file_sep>/testdata/unicode.toml # Basic Unicode literal support. [[test]] name = "literal1" regex = '☃' haystack = "☃" matches = [[0, 3]] [[test]] name = "literal2" regex = '☃+' haystack = "☃" matches = [[0, 3]] [[test]] name = "literal3" regex = '☃+' haystack = "☃" matches = [[0, 3]] case-insensitive = true [[test]] name = "literal4" regex = 'Δ' haystack = "δ" matches = [[0, 2]] case-insensitive = true # Unicode word boundaries. [[test]] name = "wb-100" regex = '\d\b' haystack = "6δ" matches = [] [[test]] name = "wb-200" regex = '\d\b' haystack = "6 " matches = [[0, 1]] [[test]] name = "wb-300" regex = '\d\B' haystack = "6δ" matches = [[0, 1]] [[test]] name = "wb-400" regex = '\d\B' haystack = "6 " matches = [] # Unicode character class support. [[test]] name = "class1" regex = '[☃Ⅰ]+' haystack = "☃" matches = [[0, 3]] [[test]] name = "class2" regex = '\pN' haystack = "Ⅰ" matches = [[0, 3]] [[test]] name = "class3" regex = '\pN+' haystack = "Ⅰ1Ⅱ2" matches = [[0, 8]] [[test]] name = "class4" regex = '\PN+' haystack = "abⅠ" matches = [[0, 2]] [[test]] name = "class5" regex = '[\PN]+' haystack = "abⅠ" matches = [[0, 2]] [[test]] name = "class6" regex = '[^\PN]+' haystack = "abⅠ" matches = [[2, 5]] [[test]] name = "class7" regex = '\p{Lu}+' haystack = "ΛΘΓΔα" matches = [[0, 8]] [[test]] name = "class8" regex = '\p{Lu}+' haystack = "ΛΘΓΔα" matches = [[0, 10]] case-insensitive = true [[test]] name = "class9" regex = '\pL+' haystack = "ΛΘΓΔα" matches = [[0, 10]] [[test]] name = "class10" regex = '\p{Ll}+' haystack = "ΛΘΓΔα" matches = [[8, 10]] # Unicode aware "Perl" character classes. [[test]] name = "perl1" regex = '\w+' haystack = "dδd" matches = [[0, 4]] [[test]] name = "perl2" regex = '\w+' haystack = "⥡" matches = [] [[test]] name = "perl3" regex = '\W+' haystack = "⥡" matches = [[0, 3]] [[test]] name = "perl4" regex = '\d+' haystack = "1२३9" matches = [[0, 8]] [[test]] name = "perl5" regex = '\d+' haystack = "Ⅱ" matches = [] [[test]] name = "perl6" regex = '\D+' haystack = "Ⅱ" matches = [[0, 3]] [[test]] name = "perl7" regex = '\s+' haystack = " " matches = [[0, 3]] [[test]] name = "perl8" regex = '\s+' haystack = "☃" matches = [] [[test]] name = "perl9" regex = '\S+' haystack = "☃" matches = [[0, 3]] # Specific tests for Unicode general category classes. [[test]] name = "class-gencat1" regex = '\p{Cased_Letter}' haystack = "A" matches = [[0, 3]] [[test]] name = "class-gencat2" regex = '\p{Close_Punctuation}' haystack = "❯" matches = [[0, 3]] [[test]] name = "class-gencat3" regex = '\p{Connector_Punctuation}' haystack = "⁀" matches = [[0, 3]] [[test]] name = "class-gencat4" regex = '\p{Control}' haystack = "\u009F" matches = [[0, 2]] [[test]] name = "class-gencat5" regex = '\p{Currency_Symbol}' haystack = "£" matches = [[0, 3]] [[test]] name = "class-gencat6" regex = '\p{Dash_Punctuation}' haystack = "〰" matches = [[0, 3]] [[test]] name = "class-gencat7" regex = '\p{Decimal_Number}' haystack = "𑓙" matches = [[0, 4]] [[test]] name = "class-gencat8" regex = '\p{Enclosing_Mark}' haystack = "\uA672" matches = [[0, 3]] [[test]] name = "class-gencat9" regex = '\p{Final_Punctuation}' haystack = "⸡" matches = [[0, 3]] [[test]] name = "class-gencat10" regex = '\p{Format}' haystack = "\U000E007F" matches = [[0, 4]] [[test]] name = "class-gencat11" regex = '\p{Initial_Punctuation}' haystack = "⸜" matches = [[0, 3]] [[test]] name = "class-gencat12" regex = '\p{Letter}' haystack = "Έ" matches = [[0, 2]] [[test]] name = "class-gencat13" regex = '\p{Letter_Number}' haystack = "ↂ" matches = [[0, 3]] [[test]] name = "class-gencat14" regex = '\p{Line_Separator}' haystack = "\u2028" matches = [[0, 3]] [[test]] name = "class-gencat15" regex = '\p{Lowercase_Letter}' haystack = "ϛ" matches = [[0, 2]] [[test]] name = "class-gencat16" regex = '\p{Mark}' haystack = "\U000E01EF" matches = [[0, 4]] [[test]] name = "class-gencat17" regex = '\p{Math}' haystack = "⋿" matches = [[0, 3]] [[test]] name = "class-gencat18" regex = '\p{Modifier_Letter}' haystack = "𖭃" matches = [[0, 4]] [[test]] name = "class-gencat19" regex = '\p{Modifier_Symbol}' haystack = "🏿" matches = [[0, 4]] [[test]] name = "class-gencat20" regex = '\p{Nonspacing_Mark}' haystack = "\U0001E94A" matches = [[0, 4]] [[test]] name = "class-gencat21" regex = '\p{Number}' haystack = "⓿" matches = [[0, 3]] [[test]] name = "class-gencat22" regex = '\p{Open_Punctuation}' haystack = "⦅" matches = [[0, 3]] [[test]] name = "class-gencat23" regex = '\p{Other}' haystack = "\u0BC9" matches = [[0, 3]] [[test]] name = "class-gencat24" regex = '\p{Other_Letter}' haystack = "ꓷ" matches = [[0, 3]] [[test]] name = "class-gencat25" regex = '\p{Other_Number}' haystack = "㉏" matches = [[0, 3]] [[test]] name = "class-gencat26" regex = '\p{Other_Punctuation}' haystack = "𞥞" matches = [[0, 4]] [[test]] name = "class-gencat27" regex = '\p{Other_Symbol}' haystack = "⅌" matches = [[0, 3]] [[test]] name = "class-gencat28" regex = '\p{Paragraph_Separator}' haystack = "\u2029" matches = [[0, 3]] [[test]] name = "class-gencat29" regex = '\p{Private_Use}' haystack = "\U0010FFFD" matches = [[0, 4]] [[test]] name = "class-gencat30" regex = '\p{Punctuation}' haystack = "𑁍" matches = [[0, 4]] [[test]] name = "class-gencat31" regex = '\p{Separator}' haystack = "\u3000" matches = [[0, 3]] [[test]] name = "class-gencat32" regex = '\p{Space_Separator}' haystack = "\u205F" matches = [[0, 3]] [[test]] name = "class-gencat33" regex = '\p{Spacing_Mark}' haystack = "\U00016F7E" matches = [[0, 4]] [[test]] name = "class-gencat34" regex = '\p{Symbol}' haystack = "⯈" matches = [[0, 3]] [[test]] name = "class-gencat35" regex = '\p{Titlecase_Letter}' haystack = "ῼ" matches = [[0, 3]] [[test]] name = "class-gencat36" regex = '\p{Unassigned}' haystack = "\U0010FFFF" matches = [[0, 4]] [[test]] name = "class-gencat37" regex = '\p{Uppercase_Letter}' haystack = "Ꝋ" matches = [[0, 3]] # Tests for Unicode emoji properties. [[test]] name = "class-emoji1" regex = '\p{Emoji}' haystack = "\u23E9" matches = [[0, 3]] [[test]] name = "class-emoji2" regex = '\p{emoji}' haystack = "\U0001F21A" matches = [[0, 4]] [[test]] name = "class-emoji3" regex = '\p{extendedpictographic}' haystack = "\U0001FA6E" matches = [[0, 4]] [[test]] name = "class-emoji4" regex = '\p{extendedpictographic}' haystack = "\U0001FFFD" matches = [[0, 4]] # Tests for Unicode grapheme cluster properties. [[test]] name = "class-gcb1" regex = '\p{grapheme_cluster_break=prepend}' haystack = "\U00011D46" matches = [[0, 4]] [[test]] name = "class-gcb2" regex = '\p{gcb=regional_indicator}' haystack = "\U0001F1E6" matches = [[0, 4]] [[test]] name = "class-gcb3" regex = '\p{gcb=ri}' haystack = "\U0001F1E7" matches = [[0, 4]] [[test]] name = "class-gcb4" regex = '\p{regionalindicator}' haystack = "\U0001F1FF" matches = [[0, 4]] [[test]] name = "class-gcb5" regex = '\p{gcb=lvt}' haystack = "\uC989" matches = [[0, 3]] [[test]] name = "class-gcb6" regex = '\p{gcb=zwj}' haystack = "\u200D" matches = [[0, 3]] # Tests for Unicode word boundary properties. [[test]] name = "class-word-break1" regex = '\p{word_break=Hebrew_Letter}' haystack = "\uFB46" matches = [[0, 3]] [[test]] name = "class-word-break2" regex = '\p{wb=hebrewletter}' haystack = "\uFB46" matches = [[0, 3]] [[test]] name = "class-word-break3" regex = '\p{wb=ExtendNumLet}' haystack = "\uFF3F" matches = [[0, 3]] [[test]] name = "class-word-break4" regex = '\p{wb=WSegSpace}' haystack = "\u3000" matches = [[0, 3]] [[test]] name = "class-word-break5" regex = '\p{wb=numeric}' haystack = "\U0001E950" matches = [[0, 4]] # Tests for Unicode sentence boundary properties. [[test]] name = "class-sentence-break1" regex = '\p{sentence_break=Lower}' haystack = "\u0469" matches = [[0, 2]] [[test]] name = "class-sentence-break2" regex = '\p{sb=lower}' haystack = "\u0469" matches = [[0, 2]] [[test]] name = "class-sentence-break3" regex = '\p{sb=Close}' haystack = "\uFF60" matches = [[0, 3]] [[test]] name = "class-sentence-break4" regex = '\p{sb=Close}' haystack = "\U0001F677" matches = [[0, 4]] [[test]] name = "class-sentence-break5" regex = '\p{sb=SContinue}' haystack = "\uFF64" matches = [[0, 3]] <file_sep>/testdata/set.toml # Basic multi-regex tests. [[test]] name = "basic10" regex = ["a", "a"] haystack = "a" matches = [ { id = 0, span = [0, 1] }, { id = 1, span = [0, 1] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic10-leftmost-first" regex = ["a", "a"] haystack = "a" matches = [ { id = 0, span = [0, 1] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "basic20" regex = ["a", "a"] haystack = "ba" matches = [ { id = 0, span = [1, 2] }, { id = 1, span = [1, 2] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic30" regex = ["a", "b"] haystack = "a" matches = [ { id = 0, span = [0, 1] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic40" regex = ["a", "b"] haystack = "b" matches = [ { id = 1, span = [0, 1] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic50" regex = ["a|b", "b|a"] haystack = "b" matches = [ { id = 0, span = [0, 1] }, { id = 1, span = [0, 1] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic60" regex = ["foo", "oo"] haystack = "foo" matches = [ { id = 0, span = [0, 3] }, { id = 1, span = [1, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic60-leftmost-first" regex = ["foo", "oo"] haystack = "foo" matches = [ { id = 0, span = [0, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "basic61" regex = ["oo", "foo"] haystack = "foo" matches = [ { id = 1, span = [0, 3] }, { id = 0, span = [1, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic61-leftmost-first" regex = ["oo", "foo"] haystack = "foo" matches = [ { id = 1, span = [0, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "basic70" regex = ["abcd", "bcd", "cd", "d"] haystack = "abcd" matches = [ { id = 0, span = [0, 4] }, { id = 1, span = [1, 4] }, { id = 2, span = [2, 4] }, { id = 3, span = [3, 4] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic71" regex = ["bcd", "cd", "d", "abcd"] haystack = "abcd" matches = [ { id = 3, span = [0, 4] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "basic80" regex = ["^foo", "bar$"] haystack = "foo" matches = [ { id = 0, span = [0, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic81" regex = ["^foo", "bar$"] haystack = "foo bar" matches = [ { id = 0, span = [0, 3] }, { id = 1, span = [4, 7] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic82" regex = ["^foo", "bar$"] haystack = "bar" matches = [ { id = 1, span = [0, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic90" regex = ["[a-z]+$", "foo"] haystack = "01234 foo" matches = [ { id = 0, span = [8, 9] }, { id = 0, span = [7, 9] }, { id = 0, span = [6, 9] }, { id = 1, span = [6, 9] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic91" regex = ["[a-z]+$", "foo"] haystack = "foo 01234" matches = [ { id = 1, span = [0, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic100" regex = [".*?", "a"] haystack = "zzza" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [0, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [1, 2] }, { id = 0, span = [0, 2] }, { id = 0, span = [3, 3] }, { id = 0, span = [2, 3] }, { id = 0, span = [1, 3] }, { id = 0, span = [0, 3] }, { id = 0, span = [4, 4] }, { id = 0, span = [3, 4] }, { id = 0, span = [2, 4] }, { id = 0, span = [1, 4] }, { id = 0, span = [0, 4] }, { id = 1, span = [3, 4] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic101" regex = [".*", "a"] haystack = "zzza" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [0, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [1, 2] }, { id = 0, span = [0, 2] }, { id = 0, span = [3, 3] }, { id = 0, span = [2, 3] }, { id = 0, span = [1, 3] }, { id = 0, span = [0, 3] }, { id = 0, span = [4, 4] }, { id = 0, span = [3, 4] }, { id = 0, span = [2, 4] }, { id = 0, span = [1, 4] }, { id = 0, span = [0, 4] }, { id = 1, span = [3, 4] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic102" regex = [".*", "a"] haystack = "zzz" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [0, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [1, 2] }, { id = 0, span = [0, 2] }, { id = 0, span = [3, 3] }, { id = 0, span = [2, 3] }, { id = 0, span = [1, 3] }, { id = 0, span = [0, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic110" regex = ['\ba\b'] haystack = "hello a bye" matches = [ { id = 0, span = [6, 7] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic111" regex = ['\ba\b', '\be\b'] haystack = "hello a bye e" matches = [ { id = 0, span = [6, 7] }, { id = 1, span = [12, 13] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic120" regex = ["a"] haystack = "a" matches = [ { id = 0, span = [0, 1] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic121" regex = [".*a"] haystack = "a" matches = [ { id = 0, span = [0, 1] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic122" regex = [".*a", "β"] haystack = "β" matches = [ { id = 1, span = [0, 2] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "basic130" regex = ["ab", "b"] haystack = "ba" matches = [ { id = 1, span = [0, 1] }, ] match-kind = "all" search-kind = "overlapping" # These test cases where one of the regexes matches the empty string. [[test]] name = "empty10" regex = ["", "a"] haystack = "abc" matches = [ { id = 0, span = [0, 0] }, { id = 1, span = [0, 1] }, { id = 0, span = [1, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty10-leftmost-first" regex = ["", "a"] haystack = "abc" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [3, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "empty11" regex = ["a", ""] haystack = "abc" matches = [ { id = 1, span = [0, 0] }, { id = 0, span = [0, 1] }, { id = 1, span = [1, 1] }, { id = 1, span = [2, 2] }, { id = 1, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty11-leftmost-first" regex = ["a", ""] haystack = "abc" matches = [ { id = 0, span = [0, 1] }, { id = 1, span = [2, 2] }, { id = 1, span = [3, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "empty20" regex = ["", "b"] haystack = "abc" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 1, span = [1, 2] }, { id = 0, span = [2, 2] }, { id = 0, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty20-leftmost-first" regex = ["", "b"] haystack = "abc" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [3, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "empty21" regex = ["b", ""] haystack = "abc" matches = [ { id = 1, span = [0, 0] }, { id = 1, span = [1, 1] }, { id = 0, span = [1, 2] }, { id = 1, span = [2, 2] }, { id = 1, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty21-leftmost-first" regex = ["b", ""] haystack = "abc" matches = [ { id = 1, span = [0, 0] }, { id = 0, span = [1, 2] }, { id = 1, span = [3, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "empty22" regex = ["(?:)", "b"] haystack = "abc" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 1, span = [1, 2] }, { id = 0, span = [2, 2] }, { id = 0, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty23" regex = ["b", "(?:)"] haystack = "abc" matches = [ { id = 1, span = [0, 0] }, { id = 1, span = [1, 1] }, { id = 0, span = [1, 2] }, { id = 1, span = [2, 2] }, { id = 1, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty30" regex = ["", "z"] haystack = "abc" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty30-leftmost-first" regex = ["", "z"] haystack = "abc" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [3, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "empty31" regex = ["z", ""] haystack = "abc" matches = [ { id = 1, span = [0, 0] }, { id = 1, span = [1, 1] }, { id = 1, span = [2, 2] }, { id = 1, span = [3, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty31-leftmost-first" regex = ["z", ""] haystack = "abc" matches = [ { id = 1, span = [0, 0] }, { id = 1, span = [1, 1] }, { id = 1, span = [2, 2] }, { id = 1, span = [3, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" [[test]] name = "empty40" regex = ["c(?:)", "b"] haystack = "abc" matches = [ { id = 1, span = [1, 2] }, { id = 0, span = [2, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty40-leftmost-first" regex = ["c(?:)", "b"] haystack = "abc" matches = [ { id = 1, span = [1, 2] }, { id = 0, span = [2, 3] }, ] match-kind = "leftmost-first" search-kind = "leftmost" # These test cases where there are no matches. [[test]] name = "nomatch10" regex = ["a", "a"] haystack = "b" matches = [] match-kind = "all" search-kind = "overlapping" [[test]] name = "nomatch20" regex = ["^foo", "bar$"] haystack = "bar foo" matches = [] match-kind = "all" search-kind = "overlapping" [[test]] name = "nomatch30" regex = [] haystack = "a" matches = [] match-kind = "all" search-kind = "overlapping" [[test]] name = "nomatch40" regex = ["^rooted$", '\.log$'] haystack = "notrooted" matches = [] match-kind = "all" search-kind = "overlapping" # These test multi-regex searches with capture groups. # # NOTE: I wrote these tests in the course of developing a first class API for # overlapping capturing group matches, but ultimately removed that API because # the semantics for overlapping matches aren't totally clear. However, I've # left the tests because I believe the semantics for these patterns are clear # and because we can still test our "which patterns matched" APIs with them. [[test]] name = "caps-010" regex = ['^(\w+) (\w+)$', '^(\S+) (\S+)$'] haystack = "Bruce Springsteen" matches = [ { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, ] match-kind = "all" search-kind = "overlapping" unicode = false utf8 = false [[test]] name = "caps-020" regex = ['^(\w+) (\w+)$', '^[A-Z](\S+) [A-Z](\S+)$'] haystack = "Bruce Springsteen" matches = [ { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, { id = 1, spans = [[0, 17], [1, 5], [7, 17]] }, ] match-kind = "all" search-kind = "overlapping" unicode = false utf8 = false [[test]] name = "caps-030" regex = ['^(\w+) (\w+)$', '^([A-Z])(\S+) ([A-Z])(\S+)$'] haystack = "Bruce Springsteen" matches = [ { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, { id = 1, spans = [[0, 17], [0, 1], [1, 5], [6, 7], [7, 17]] }, ] match-kind = "all" search-kind = "overlapping" unicode = false utf8 = false [[test]] name = "caps-110" regex = ['(\w+) (\w+)', '(\S+) (\S+)'] haystack = "Bruce Springsteen" matches = [ { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, ] match-kind = "leftmost-first" search-kind = "leftmost" unicode = false utf8 = false [[test]] name = "caps-120" regex = ['(\w+) (\w+)', '(\S+) (\S+)'] haystack = "&ruce $pringsteen" matches = [ { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, ] match-kind = "leftmost-first" search-kind = "leftmost" unicode = false utf8 = false [[test]] name = "caps-121" regex = ['(\w+) (\w+)', '(\S+) (\S+)'] haystack = "&ruce $pringsteen Foo Bar" matches = [ { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, { id = 0, spans = [[18, 25], [18, 21], [22, 25]] }, ] match-kind = "leftmost-first" search-kind = "leftmost" unicode = false utf8 = false <file_sep>/regex-automata/src/dfa/search.rs use crate::{ dfa::{ accel, automaton::{Automaton, OverlappingState}, }, util::{ prefilter::Prefilter, primitives::StateID, search::{Anchored, HalfMatch, Input, Span}, }, MatchError, }; #[inline(never)] pub fn find_fwd<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { if input.is_done() { return Ok(None); } let pre = if input.get_anchored().is_anchored() { None } else { dfa.get_prefilter() }; // Searching with a pattern ID is always anchored, so we should never use // a prefilter. if pre.is_some() { if input.get_earliest() { find_fwd_imp(dfa, input, pre, true) } else { find_fwd_imp(dfa, input, pre, false) } } else { if input.get_earliest() { find_fwd_imp(dfa, input, None, true) } else { find_fwd_imp(dfa, input, None, false) } } } #[cfg_attr(feature = "perf-inline", inline(always))] fn find_fwd_imp<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, pre: Option<&'_ Prefilter>, earliest: bool, ) -> Result<Option<HalfMatch>, MatchError> { // See 'prefilter_restart' docs for explanation. let universal_start = dfa.universal_start_state(Anchored::No).is_some(); let mut mat = None; let mut sid = init_fwd(dfa, input)?; let mut at = input.start(); // This could just be a closure, but then I think it would be unsound // because it would need to be safe to invoke. This way, the lack of safety // is clearer in the code below. macro_rules! next_unchecked { ($sid:expr, $at:expr) => {{ let byte = *input.haystack().get_unchecked($at); dfa.next_state_unchecked($sid, byte) }}; } if let Some(ref pre) = pre { let span = Span::from(at..input.end()); // If a prefilter doesn't report false positives, then we don't need to // touch the DFA at all. However, since all matches include the pattern // ID, and the prefilter infrastructure doesn't report pattern IDs, we // limit this optimization to cases where there is exactly one pattern. // In that case, any match must be the 0th pattern. match pre.find(input.haystack(), span) { None => return Ok(mat), Some(ref span) => { at = span.start; if !universal_start { sid = prefilter_restart(dfa, &input, at)?; } } } } while at < input.end() { // SAFETY: There are two safety invariants we need to uphold here in // the loops below: that 'sid' and 'prev_sid' are valid state IDs // for this DFA, and that 'at' is a valid index into 'haystack'. // For the former, we rely on the invariant that next_state* and // start_state_forward always returns a valid state ID (given a valid // state ID in the former case). For the latter safety invariant, we // always guard unchecked access with a check that 'at' is less than // 'end', where 'end <= haystack.len()'. In the unrolled loop below, we // ensure that 'at' is always in bounds. // // PERF: See a similar comment in src/hybrid/search.rs that justifies // this extra work to make the search loop fast. The same reasoning and // benchmarks apply here. let mut prev_sid; while at < input.end() { prev_sid = unsafe { next_unchecked!(sid, at) }; if dfa.is_special_state(prev_sid) || at + 3 >= input.end() { core::mem::swap(&mut prev_sid, &mut sid); break; } at += 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if dfa.is_special_state(sid) { break; } at += 1; prev_sid = unsafe { next_unchecked!(sid, at) }; if dfa.is_special_state(prev_sid) { core::mem::swap(&mut prev_sid, &mut sid); break; } at += 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if dfa.is_special_state(sid) { break; } at += 1; } if dfa.is_special_state(sid) { if dfa.is_start_state(sid) { if let Some(ref pre) = pre { let span = Span::from(at..input.end()); match pre.find(input.haystack(), span) { None => return Ok(mat), Some(ref span) => { // We want to skip any update to 'at' below // at the end of this iteration and just // jump immediately back to the next state // transition at the leading position of the // candidate match. // // ... but only if we actually made progress // with our prefilter, otherwise if the start // state has a self-loop, we can get stuck. if span.start > at { at = span.start; if !universal_start { sid = prefilter_restart(dfa, &input, at)?; } continue; } } } } else if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); at = accel::find_fwd(needles, input.haystack(), at + 1) .unwrap_or(input.end()); continue; } } else if dfa.is_match_state(sid) { let pattern = dfa.match_pattern(sid, 0); mat = Some(HalfMatch::new(pattern, at)); if earliest { return Ok(mat); } if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); at = accel::find_fwd(needles, input.haystack(), at + 1) .unwrap_or(input.end()); continue; } } else if dfa.is_accel_state(sid) { let needs = dfa.accelerator(sid); at = accel::find_fwd(needs, input.haystack(), at + 1) .unwrap_or(input.end()); continue; } else if dfa.is_dead_state(sid) { return Ok(mat); } else { // It's important that this is a debug_assert, since this can // actually be tripped even if DFA::from_bytes succeeds and // returns a supposedly valid DFA. debug_assert!(dfa.is_quit_state(sid)); return Err(MatchError::quit(input.haystack()[at], at)); } } at += 1; } eoi_fwd(dfa, input, &mut sid, &mut mat)?; Ok(mat) } #[inline(never)] pub fn find_rev<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { if input.is_done() { return Ok(None); } if input.get_earliest() { find_rev_imp(dfa, input, true) } else { find_rev_imp(dfa, input, false) } } #[cfg_attr(feature = "perf-inline", inline(always))] fn find_rev_imp<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, earliest: bool, ) -> Result<Option<HalfMatch>, MatchError> { let mut mat = None; let mut sid = init_rev(dfa, input)?; // In reverse search, the loop below can't handle the case of searching an // empty slice. Ideally we could write something congruent to the forward // search, i.e., 'while at >= start', but 'start' might be 0. Since we use // an unsigned offset, 'at >= 0' is trivially always true. We could avoid // this extra case handling by using a signed offset, but Rust makes it // annoying to do. So... We just handle the empty case separately. if input.start() == input.end() { eoi_rev(dfa, input, &mut sid, &mut mat)?; return Ok(mat); } let mut at = input.end() - 1; macro_rules! next_unchecked { ($sid:expr, $at:expr) => {{ let byte = *input.haystack().get_unchecked($at); dfa.next_state_unchecked($sid, byte) }}; } loop { // SAFETY: See comments in 'find_fwd' for a safety argument. let mut prev_sid; while at >= input.start() { prev_sid = unsafe { next_unchecked!(sid, at) }; if dfa.is_special_state(prev_sid) || at <= input.start().saturating_add(3) { core::mem::swap(&mut prev_sid, &mut sid); break; } at -= 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if dfa.is_special_state(sid) { break; } at -= 1; prev_sid = unsafe { next_unchecked!(sid, at) }; if dfa.is_special_state(prev_sid) { core::mem::swap(&mut prev_sid, &mut sid); break; } at -= 1; sid = unsafe { next_unchecked!(prev_sid, at) }; if dfa.is_special_state(sid) { break; } at -= 1; } if dfa.is_special_state(sid) { if dfa.is_start_state(sid) { if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); at = accel::find_rev(needles, input.haystack(), at) .map(|i| i + 1) .unwrap_or(input.start()); } } else if dfa.is_match_state(sid) { let pattern = dfa.match_pattern(sid, 0); // Since reverse searches report the beginning of a match // and the beginning is inclusive (not exclusive like the // end of a match), we add 1 to make it inclusive. mat = Some(HalfMatch::new(pattern, at + 1)); if earliest { return Ok(mat); } if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); at = accel::find_rev(needles, input.haystack(), at) .map(|i| i + 1) .unwrap_or(input.start()); } } else if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); // If the accelerator returns nothing, why don't we quit the // search? Well, if the accelerator doesn't find anything, that // doesn't mean we don't have a match. It just means that we // can't leave the current state given one of the 255 possible // byte values. However, there might be an EOI transition. So // we set 'at' to the end of the haystack, which will cause // this loop to stop and fall down into the EOI transition. at = accel::find_rev(needles, input.haystack(), at) .map(|i| i + 1) .unwrap_or(input.start()); } else if dfa.is_dead_state(sid) { return Ok(mat); } else { debug_assert!(dfa.is_quit_state(sid)); return Err(MatchError::quit(input.haystack()[at], at)); } } if at == input.start() { break; } at -= 1; } eoi_rev(dfa, input, &mut sid, &mut mat)?; Ok(mat) } #[inline(never)] pub fn find_overlapping_fwd<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { state.mat = None; if input.is_done() { return Ok(()); } let pre = if input.get_anchored().is_anchored() { None } else { dfa.get_prefilter() }; if pre.is_some() { find_overlapping_fwd_imp(dfa, input, pre, state) } else { find_overlapping_fwd_imp(dfa, input, None, state) } } #[cfg_attr(feature = "perf-inline", inline(always))] fn find_overlapping_fwd_imp<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, pre: Option<&'_ Prefilter>, state: &mut OverlappingState, ) -> Result<(), MatchError> { // See 'prefilter_restart' docs for explanation. let universal_start = dfa.universal_start_state(Anchored::No).is_some(); let mut sid = match state.id { None => { state.at = input.start(); init_fwd(dfa, input)? } Some(sid) => { if let Some(match_index) = state.next_match_index { let match_len = dfa.match_len(sid); if match_index < match_len { state.next_match_index = Some(match_index + 1); let pattern = dfa.match_pattern(sid, match_index); state.mat = Some(HalfMatch::new(pattern, state.at)); return Ok(()); } } // Once we've reported all matches at a given position, we need to // advance the search to the next position. state.at += 1; if state.at > input.end() { return Ok(()); } sid } }; // NOTE: We don't optimize the crap out of this routine primarily because // it seems like most find_overlapping searches will have higher match // counts, and thus, throughput is perhaps not as important. But if you // have a use case for something faster, feel free to file an issue. while state.at < input.end() { sid = dfa.next_state(sid, input.haystack()[state.at]); if dfa.is_special_state(sid) { state.id = Some(sid); if dfa.is_start_state(sid) { if let Some(ref pre) = pre { let span = Span::from(state.at..input.end()); match pre.find(input.haystack(), span) { None => return Ok(()), Some(ref span) => { if span.start > state.at { state.at = span.start; if !universal_start { sid = prefilter_restart( dfa, &input, state.at, )?; } continue; } } } } else if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); state.at = accel::find_fwd( needles, input.haystack(), state.at + 1, ) .unwrap_or(input.end()); continue; } } else if dfa.is_match_state(sid) { state.next_match_index = Some(1); let pattern = dfa.match_pattern(sid, 0); state.mat = Some(HalfMatch::new(pattern, state.at)); return Ok(()); } else if dfa.is_accel_state(sid) { let needs = dfa.accelerator(sid); // If the accelerator returns nothing, why don't we quit the // search? Well, if the accelerator doesn't find anything, that // doesn't mean we don't have a match. It just means that we // can't leave the current state given one of the 255 possible // byte values. However, there might be an EOI transition. So // we set 'at' to the end of the haystack, which will cause // this loop to stop and fall down into the EOI transition. state.at = accel::find_fwd(needs, input.haystack(), state.at + 1) .unwrap_or(input.end()); continue; } else if dfa.is_dead_state(sid) { return Ok(()); } else { debug_assert!(dfa.is_quit_state(sid)); return Err(MatchError::quit( input.haystack()[state.at], state.at, )); } } state.at += 1; } let result = eoi_fwd(dfa, input, &mut sid, &mut state.mat); state.id = Some(sid); if state.mat.is_some() { // '1' is always correct here since if we get to this point, this // always corresponds to the first (index '0') match discovered at // this position. So the next match to report at this position (if // it exists) is at index '1'. state.next_match_index = Some(1); } result } #[inline(never)] pub(crate) fn find_overlapping_rev<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { state.mat = None; if input.is_done() { return Ok(()); } let mut sid = match state.id { None => { let sid = init_rev(dfa, input)?; state.id = Some(sid); if input.start() == input.end() { state.rev_eoi = true; } else { state.at = input.end() - 1; } sid } Some(sid) => { if let Some(match_index) = state.next_match_index { let match_len = dfa.match_len(sid); if match_index < match_len { state.next_match_index = Some(match_index + 1); let pattern = dfa.match_pattern(sid, match_index); state.mat = Some(HalfMatch::new(pattern, state.at)); return Ok(()); } } // Once we've reported all matches at a given position, we need // to advance the search to the next position. However, if we've // already followed the EOI transition, then we know we're done // with the search and there cannot be any more matches to report. if state.rev_eoi { return Ok(()); } else if state.at == input.start() { // At this point, we should follow the EOI transition. This // will cause us the skip the main loop below and fall through // to the final 'eoi_rev' transition. state.rev_eoi = true; } else { // We haven't hit the end of the search yet, so move on. state.at -= 1; } sid } }; while !state.rev_eoi { sid = dfa.next_state(sid, input.haystack()[state.at]); if dfa.is_special_state(sid) { state.id = Some(sid); if dfa.is_start_state(sid) { if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); state.at = accel::find_rev(needles, input.haystack(), state.at) .map(|i| i + 1) .unwrap_or(input.start()); } } else if dfa.is_match_state(sid) { state.next_match_index = Some(1); let pattern = dfa.match_pattern(sid, 0); state.mat = Some(HalfMatch::new(pattern, state.at + 1)); return Ok(()); } else if dfa.is_accel_state(sid) { let needles = dfa.accelerator(sid); // If the accelerator returns nothing, why don't we quit the // search? Well, if the accelerator doesn't find anything, that // doesn't mean we don't have a match. It just means that we // can't leave the current state given one of the 255 possible // byte values. However, there might be an EOI transition. So // we set 'at' to the end of the haystack, which will cause // this loop to stop and fall down into the EOI transition. state.at = accel::find_rev(needles, input.haystack(), state.at) .map(|i| i + 1) .unwrap_or(input.start()); } else if dfa.is_dead_state(sid) { return Ok(()); } else { debug_assert!(dfa.is_quit_state(sid)); return Err(MatchError::quit( input.haystack()[state.at], state.at, )); } } if state.at == input.start() { break; } state.at -= 1; } let result = eoi_rev(dfa, input, &mut sid, &mut state.mat); state.rev_eoi = true; state.id = Some(sid); if state.mat.is_some() { // '1' is always correct here since if we get to this point, this // always corresponds to the first (index '0') match discovered at // this position. So the next match to report at this position (if // it exists) is at index '1'. state.next_match_index = Some(1); } result } #[cfg_attr(feature = "perf-inline", inline(always))] fn init_fwd<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, ) -> Result<StateID, MatchError> { let sid = dfa.start_state_forward(input)?; // Start states can never be match states, since all matches are delayed // by 1 byte. debug_assert!(!dfa.is_match_state(sid)); Ok(sid) } #[cfg_attr(feature = "perf-inline", inline(always))] fn init_rev<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, ) -> Result<StateID, MatchError> { let sid = dfa.start_state_reverse(input)?; // Start states can never be match states, since all matches are delayed // by 1 byte. debug_assert!(!dfa.is_match_state(sid)); Ok(sid) } #[cfg_attr(feature = "perf-inline", inline(always))] fn eoi_fwd<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, sid: &mut StateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { let sp = input.get_span(); match input.haystack().get(sp.end) { Some(&b) => { *sid = dfa.next_state(*sid, b); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, sp.end)); } else if dfa.is_quit_state(*sid) { return Err(MatchError::quit(b, sp.end)); } } None => { *sid = dfa.next_eoi_state(*sid); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, input.haystack().len())); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!dfa.is_quit_state(*sid)); } } Ok(()) } #[cfg_attr(feature = "perf-inline", inline(always))] fn eoi_rev<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, sid: &mut StateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { let sp = input.get_span(); if sp.start > 0 { let byte = input.haystack()[sp.start - 1]; *sid = dfa.next_state(*sid, byte); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, sp.start)); } else if dfa.is_quit_state(*sid) { return Err(MatchError::quit(byte, sp.start - 1)); } } else { *sid = dfa.next_eoi_state(*sid); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, 0)); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!dfa.is_quit_state(*sid)); } Ok(()) } /// Re-compute the starting state that a DFA should be in after finding a /// prefilter candidate match at the position `at`. /// /// The function with the same name has a bit more docs in hybrid/search.rs. #[cfg_attr(feature = "perf-inline", inline(always))] fn prefilter_restart<A: Automaton + ?Sized>( dfa: &A, input: &Input<'_>, at: usize, ) -> Result<StateID, MatchError> { let mut input = input.clone(); input.set_start(at); init_fwd(dfa, &input) } <file_sep>/regex-automata/tests/gen/dense/mod.rs use regex_automata::{Input, Match}; mod multi_pattern_v2; #[test] fn multi_pattern_v2() { use multi_pattern_v2::MULTI_PATTERN_V2 as RE; assert_eq!(Some(Match::must(0, 0..4)), RE.find("abcd")); assert_eq!(Some(Match::must(0, 2..6)), RE.find("@ abcd @")); assert_eq!(Some(Match::must(1, 0..6)), RE.find("@abcd@")); assert_eq!(Some(Match::must(0, 1..5)), RE.find("\nabcd\n")); assert_eq!(Some(Match::must(0, 1..5)), RE.find("\nabcd wxyz\n")); assert_eq!(Some(Match::must(1, 1..7)), RE.find("\n@abcd@\n")); assert_eq!(Some(Match::must(2, 0..6)), RE.find("@abcd@\r\n")); assert_eq!(Some(Match::must(1, 2..8)), RE.find("\r\n@abcd@")); assert_eq!(Some(Match::must(2, 2..8)), RE.find("\r\n@abcd@\r\n")); // Fails because we have heuristic support for Unicode word boundaries // enabled. assert!(RE.try_search(&Input::new(b"\xFF@abcd@\xFF")).is_err()); } <file_sep>/regex-automata/tests/meta/suite.rs use { anyhow::Result, regex_automata::{ meta::{self, Regex}, util::syntax, MatchKind, PatternSet, }, regex_test::{ CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, TestRunner, }, }; use crate::{create_input, suite, testify_captures}; const BLACKLIST: &[&str] = &[ // These 'earliest' tests are blacklisted because the meta searcher doesn't // give the same offsets that the test expects. This is legal because the // 'earliest' routines don't guarantee a particular match offset other // than "the earliest the regex engine can report a match." Some regex // engines will quit earlier than others. The backtracker, for example, // can't really quit before finding the full leftmost-first match. Many of // the literal searchers also don't have the ability to quit fully or it's // otherwise not worth doing. (A literal searcher not quitting as early as // possible usually means looking at a few more bytes. That's no biggie.) "earliest/", ]; /// Tests the default configuration of the meta regex engine. #[test] fn default() -> Result<()> { let builder = Regex::builder(); let mut runner = TestRunner::new()?; runner .expand(&["is_match", "find", "captures"], |test| test.compiles()) .blacklist_iter(BLACKLIST) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the default configuration minus the full DFA. #[test] fn no_dfa() -> Result<()> { let mut builder = Regex::builder(); builder.configure(Regex::config().dfa(false)); let mut runner = TestRunner::new()?; runner .expand(&["is_match", "find", "captures"], |test| test.compiles()) .blacklist_iter(BLACKLIST) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the default configuration minus the full DFA and lazy DFA. #[test] fn no_dfa_hybrid() -> Result<()> { let mut builder = Regex::builder(); builder.configure(Regex::config().dfa(false).hybrid(false)); let mut runner = TestRunner::new()?; runner .expand(&["is_match", "find", "captures"], |test| test.compiles()) .blacklist_iter(BLACKLIST) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the default configuration minus the full DFA, lazy DFA and one-pass /// DFA. #[test] fn no_dfa_hybrid_onepass() -> Result<()> { let mut builder = Regex::builder(); builder.configure(Regex::config().dfa(false).hybrid(false).onepass(false)); let mut runner = TestRunner::new()?; runner .expand(&["is_match", "find", "captures"], |test| test.compiles()) .blacklist_iter(BLACKLIST) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } /// Tests the default configuration minus the full DFA, lazy DFA, one-pass /// DFA and backtracker. #[test] fn no_dfa_hybrid_onepass_backtrack() -> Result<()> { let mut builder = Regex::builder(); builder.configure( Regex::config() .dfa(false) .hybrid(false) .onepass(false) .backtrack(false), ); let mut runner = TestRunner::new()?; runner .expand(&["is_match", "find", "captures"], |test| test.compiles()) .blacklist_iter(BLACKLIST) .test_iter(suite()?.iter(), compiler(builder)) .assert(); Ok(()) } fn compiler( mut builder: meta::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { move |test, regexes| { if !configure_meta_builder(test, &mut builder) { return Ok(CompiledRegex::skip()); } let re = builder.build_many(&regexes)?; Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, test) })) } } fn run_test(re: &Regex, test: &RegexTest) -> TestResult { let input = create_input(test); match test.additional_name() { "is_match" => TestResult::matched(re.is_match(input)), "find" => match test.search_kind() { SearchKind::Earliest => TestResult::matches( re.find_iter(input.earliest(true)) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }), ), SearchKind::Leftmost => TestResult::matches( re.find_iter(input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }), ), SearchKind::Overlapping => { let mut patset = PatternSet::new(re.pattern_len()); re.which_overlapping_matches(&input, &mut patset); TestResult::which(patset.iter().map(|p| p.as_usize())) } }, "captures" => match test.search_kind() { SearchKind::Earliest => { let it = re .captures_iter(input.earliest(true)) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|caps| testify_captures(&caps)); TestResult::captures(it) } SearchKind::Leftmost => { let it = re .captures_iter(input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|caps| testify_captures(&caps)); TestResult::captures(it) } SearchKind::Overlapping => { // There is no overlapping regex API that supports captures. TestResult::skip() } }, name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Configures the given regex builder with all relevant settings on the given /// regex test. /// /// If the regex test has a setting that is unsupported, then this returns /// false (implying the test should be skipped). fn configure_meta_builder( test: &RegexTest, builder: &mut meta::Builder, ) -> bool { let match_kind = match test.match_kind() { regex_test::MatchKind::All => MatchKind::All, regex_test::MatchKind::LeftmostFirst => MatchKind::LeftmostFirst, regex_test::MatchKind::LeftmostLongest => return false, }; let meta_config = Regex::config() .match_kind(match_kind) .utf8_empty(test.utf8()) .line_terminator(test.line_terminator()); builder.configure(meta_config).syntax(config_syntax(test)); true } /// Configuration of the regex parser from a regex test. fn config_syntax(test: &RegexTest) -> syntax::Config { syntax::Config::new() .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .utf8(test.utf8()) .line_terminator(test.line_terminator()) } <file_sep>/regex-automata/src/hybrid/error.rs use crate::{hybrid::id::LazyStateIDError, nfa}; /// An error that occurs when initial construction of a lazy DFA fails. /// /// A build error can occur when insufficient cache capacity is configured or /// if something about the NFA is unsupported. (For example, if one attempts /// to build a lazy DFA without heuristic Unicode support but with an NFA that /// contains a Unicode word boundary.) /// /// This error does not provide many introspection capabilities. There are /// generally only two things you can do with it: /// /// * Obtain a human readable message via its `std::fmt::Display` impl. /// * Access an underlying /// [`nfa::thompson::BuildError`](crate::nfa::thompson::BuildError) /// type from its `source` method via the `std::error::Error` trait. This error /// only occurs when using convenience routines for building a lazy DFA /// directly from a pattern string. /// /// When the `std` feature is enabled, this implements the `std::error::Error` /// trait. #[derive(Clone, Debug)] pub struct BuildError { kind: BuildErrorKind, } #[derive(Clone, Debug)] enum BuildErrorKind { NFA(nfa::thompson::BuildError), InsufficientCacheCapacity { minimum: usize, given: usize }, InsufficientStateIDCapacity { err: LazyStateIDError }, Unsupported(&'static str), } impl BuildError { pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError { BuildError { kind: BuildErrorKind::NFA(err) } } pub(crate) fn insufficient_cache_capacity( minimum: usize, given: usize, ) -> BuildError { BuildError { kind: BuildErrorKind::InsufficientCacheCapacity { minimum, given }, } } pub(crate) fn insufficient_state_id_capacity( err: LazyStateIDError, ) -> BuildError { BuildError { kind: BuildErrorKind::InsufficientStateIDCapacity { err }, } } pub(crate) fn unsupported_dfa_word_boundary_unicode() -> BuildError { let msg = "cannot build lazy DFAs for regexes with Unicode word \ boundaries; switch to ASCII word boundaries, or \ heuristically enable Unicode word boundaries or use a \ different regex engine"; BuildError { kind: BuildErrorKind::Unsupported(msg) } } } #[cfg(feature = "std")] impl std::error::Error for BuildError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self.kind { BuildErrorKind::NFA(ref err) => Some(err), _ => None, } } } impl core::fmt::Display for BuildError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self.kind { BuildErrorKind::NFA(_) => write!(f, "error building NFA"), BuildErrorKind::InsufficientCacheCapacity { minimum, given } => { write!( f, "given cache capacity ({}) is smaller than \ minimum required ({})", given, minimum, ) } BuildErrorKind::InsufficientStateIDCapacity { ref err } => { err.fmt(f) } BuildErrorKind::Unsupported(ref msg) => { write!(f, "unsupported regex feature for DFAs: {}", msg) } } } } /// An error that occurs when cache usage has become inefficient. /// /// One of the weaknesses of a lazy DFA is that it may need to clear its /// cache repeatedly if it's not big enough. If this happens too much, then it /// can slow searching down significantly. A mitigation to this is to use /// heuristics to detect whether the cache is being used efficiently or not. /// If not, then a lazy DFA can return a `CacheError`. /// /// The default configuration of a lazy DFA in this crate is /// set such that a `CacheError` will never occur. Instead, /// callers must opt into this behavior with settings like /// [`dfa::Config::minimum_cache_clear_count`](crate::hybrid::dfa::Config::minimum_cache_clear_count) /// and /// [`dfa::Config::minimum_bytes_per_state`](crate::hybrid::dfa::Config::minimum_bytes_per_state). /// /// When the `std` feature is enabled, this implements the `std::error::Error` /// trait. #[derive(Clone, Debug)] pub struct CacheError(()); impl CacheError { pub(crate) fn too_many_cache_clears() -> CacheError { CacheError(()) } pub(crate) fn bad_efficiency() -> CacheError { CacheError(()) } } #[cfg(feature = "std")] impl std::error::Error for CacheError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { None } } impl core::fmt::Display for CacheError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "lazy DFA cache has been cleared too many times") } } <file_sep>/regex-cli/main.rs use std::{env, io::Write}; mod args; mod cmd; mod logger; mod util; fn main() -> anyhow::Result<()> { let rustlog = env::var("RUST_LOG").unwrap_or_else(|_| String::new()); let level = match &*rustlog { "" | "off" => log::LevelFilter::Off, "error" => log::LevelFilter::Error, "warn" => log::LevelFilter::Warn, "info" => log::LevelFilter::Info, "debug" => log::LevelFilter::Debug, "trace" => log::LevelFilter::Trace, unk => anyhow::bail!("unrecognized log level '{}'", unk), }; logger::Logger::init()?; log::set_max_level(level); if let Err(err) = cmd::run(&mut lexopt::Parser::from_env()) { if std::env::var("RUST_BACKTRACE").map_or(false, |v| v == "1") { writeln!(&mut std::io::stderr(), "{:?}", err).unwrap(); } else { writeln!(&mut std::io::stderr(), "{:#}", err).unwrap(); } std::process::exit(1); } Ok(()) } <file_sep>/testdata/bytes.toml # These are tests specifically crafted for regexes that can match arbitrary # bytes. In some cases, we also test the Unicode variant as well, just because # it's good sense to do so. But also, these tests aren't really about Unicode, # but whether matches are only reported at valid UTF-8 boundaries. For most # tests in this entire collection, utf8 = true. But for these tests, we use # utf8 = false. [[test]] name = "word-boundary-ascii" regex = ' \b' haystack = " δ" matches = [] unicode = false utf8 = false [[test]] name = "word-boundary-unicode" regex = ' \b' haystack = " δ" matches = [[0, 1]] unicode = true utf8 = false [[test]] name = "word-boundary-ascii-not" regex = ' \B' haystack = " δ" matches = [[0, 1]] unicode = false utf8 = false [[test]] name = "word-boundary-unicode-not" regex = ' \B' haystack = " δ" matches = [] unicode = true utf8 = false [[test]] name = "perl-word-ascii" regex = '\w+' haystack = "aδ" matches = [[0, 1]] unicode = false utf8 = false [[test]] name = "perl-word-unicode" regex = '\w+' haystack = "aδ" matches = [[0, 3]] unicode = true utf8 = false [[test]] name = "perl-decimal-ascii" regex = '\d+' haystack = "1२३9" matches = [[0, 1], [7, 8]] unicode = false utf8 = false [[test]] name = "perl-decimal-unicode" regex = '\d+' haystack = "1२३9" matches = [[0, 8]] unicode = true utf8 = false [[test]] name = "perl-whitespace-ascii" regex = '\s+' haystack = " \u1680" matches = [[0, 1]] unicode = false utf8 = false [[test]] name = "perl-whitespace-unicode" regex = '\s+' haystack = " \u1680" matches = [[0, 4]] unicode = true utf8 = false # The first `(.+)` matches two Unicode codepoints, but can't match the 5th # byte, which isn't valid UTF-8. The second (byte based) `(.+)` takes over and # matches. [[test]] name = "mixed-dot" regex = '(.+)(?-u)(.+)' haystack = '\xCE\x93\xCE\x94\xFF' matches = [ [[0, 5], [0, 4], [4, 5]], ] unescape = true unicode = true utf8 = false [[test]] name = "case-one-ascii" regex = 'a' haystack = "A" matches = [[0, 1]] case-insensitive = true unicode = false utf8 = false [[test]] name = "case-one-unicode" regex = 'a' haystack = "A" matches = [[0, 1]] case-insensitive = true unicode = true utf8 = false [[test]] name = "case-class-simple-ascii" regex = '[a-z]+' haystack = "AaAaA" matches = [[0, 5]] case-insensitive = true unicode = false utf8 = false [[test]] name = "case-class-ascii" regex = '[a-z]+' haystack = "aA\u212AaA" matches = [[0, 2], [5, 7]] case-insensitive = true unicode = false utf8 = false [[test]] name = "case-class-unicode" regex = '[a-z]+' haystack = "aA\u212AaA" matches = [[0, 7]] case-insensitive = true unicode = true utf8 = false [[test]] name = "negate-ascii" regex = '[^a]' haystack = "δ" matches = [[0, 1], [1, 2]] unicode = false utf8 = false [[test]] name = "negate-unicode" regex = '[^a]' haystack = "δ" matches = [[0, 2]] unicode = true utf8 = false # When utf8=true, this won't match, because the implicit '.*?' prefix is # Unicode aware and will refuse to match through invalid UTF-8 bytes. [[test]] name = "dotstar-prefix-ascii" regex = 'a' haystack = '\xFFa' matches = [[1, 2]] unescape = true unicode = false utf8 = false [[test]] name = "dotstar-prefix-unicode" regex = 'a' haystack = '\xFFa' matches = [[1, 2]] unescape = true unicode = true utf8 = false [[test]] name = "null-bytes" regex = '(?P<cstr>[^\x00]+)\x00' haystack = 'foo\x00' matches = [ [[0, 4], [0, 3]], ] unescape = true unicode = false utf8 = false [[test]] name = "invalid-utf8-anchor-100" regex = '\xCC?^' haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' matches = [[0, 0]] unescape = true unicode = false utf8 = false [[test]] name = "invalid-utf8-anchor-200" regex = '^\xf7|4\xff\d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########[] d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########\[] #####\x80\S7|$' haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' matches = [[22, 22]] unescape = true unicode = false utf8 = false [[test]] name = "invalid-utf8-anchor-300" regex = '^|ddp\xff\xffdddddlQd@\x80' haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' matches = [[0, 0]] unescape = true unicode = false utf8 = false [[test]] name = "word-boundary-ascii-100" regex = '\Bx\B' haystack = "áxβ" matches = [] unicode = false utf8 = false [[test]] name = "word-boundary-ascii-200" regex = '\B' haystack = "0\U0007EF5E" matches = [[2, 2], [3, 3], [4, 4], [5, 5]] unicode = false utf8 = false <file_sep>/regex-automata/src/meta/literal.rs use alloc::{vec, vec::Vec}; use regex_syntax::hir::Hir; use crate::{meta::regex::RegexInfo, util::search::MatchKind}; /// Pull out an alternation of literals from the given sequence of HIR /// expressions. /// /// There are numerous ways for this to fail. Generally, this only applies /// to regexes of the form 'foo|bar|baz|...|quux'. It can also fail if there /// are "too few" alternates, in which case, the regex engine is likely faster. /// /// And currently, this only returns something when 'hirs.len() == 1'. pub(crate) fn alternation_literals( info: &RegexInfo, hirs: &[&Hir], ) -> Option<Vec<Vec<u8>>> { use regex_syntax::hir::{HirKind, Literal}; // Might as well skip the work below if we know we can't build an // Aho-Corasick searcher. if !cfg!(feature = "perf-literal-multisubstring") { return None; } // This is pretty hacky, but basically, if `is_alternation_literal` is // true, then we can make several assumptions about the structure of our // HIR. This is what justifies the `unreachable!` statements below. if hirs.len() != 1 || !info.props()[0].look_set().is_empty() || info.props()[0].explicit_captures_len() > 0 || !info.props()[0].is_alternation_literal() || info.config().get_match_kind() != MatchKind::LeftmostFirst { return None; } let hir = &hirs[0]; let alts = match *hir.kind() { HirKind::Alternation(ref alts) => alts, _ => return None, // one literal isn't worth it }; let mut lits = vec![]; for alt in alts { let mut lit = vec![]; match *alt.kind() { HirKind::Literal(Literal(ref bytes)) => { lit.extend_from_slice(bytes) } HirKind::Concat(ref exprs) => { for e in exprs { match *e.kind() { HirKind::Literal(Literal(ref bytes)) => { lit.extend_from_slice(bytes); } _ => unreachable!("expected literal, got {:?}", e), } } } _ => unreachable!("expected literal or concat, got {:?}", alt), } lits.push(lit); } // Why do this? Well, when the number of literals is small, it's likely // that we'll use the lazy DFA which is in turn likely to be faster than // Aho-Corasick in such cases. Primarily because Aho-Corasick doesn't have // a "lazy DFA" but either a contiguous NFA or a full DFA. We rarely use // the latter because it is so hungry (in time and space), and the former // is decently fast, but not as fast as a well oiled lazy DFA. // // However, once the number starts getting large, the lazy DFA is likely // to start thrashing because of the modest default cache size. When // exactly does this happen? Dunno. But at whatever point that is (we make // a guess below based on ad hoc benchmarking), we'll want to cut over to // Aho-Corasick, where even the contiguous NFA is likely to do much better. if lits.len() < 3000 { debug!("skipping Aho-Corasick because there are too few literals"); return None; } Some(lits) } <file_sep>/regex-automata/src/util/unicode_data/mod.rs // This cfg should match the one in src/util/look.rs that uses perl_word. #[cfg(all( // We have to explicitly want to support Unicode word boundaries. feature = "unicode-word-boundary", not(all( // If we don't have regex-syntax at all, then we definitely need to // bring our own \w data table. feature = "syntax", // If unicode-perl is enabled, then regex-syntax/unicode-perl is // also enabled, which in turn means we can use regex-syntax's // is_word_character routine (and thus use its data tables). But if // unicode-perl is not enabled, even if syntax is, then we need to // bring our own. feature = "unicode-perl", )), ))] pub(crate) mod perl_word; <file_sep>/regex-lite/src/error.rs /// An error that occurred during parsing or compiling a regular expression. /// /// A parse error occurs when the syntax of the regex pattern is not /// valid. Otherwise, a regex can still fail to build if it would /// result in a machine that exceeds the configured size limit, via /// [`RegexBuilder::size_limit`](crate::RegexBuilder::size_limit). /// /// This error type provides no introspection capabilities. The only thing you /// can do with it is convert it to a string as a human readable error message. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Error { msg: &'static str, } impl Error { pub(crate) fn new(msg: &'static str) -> Error { Error { msg } } } #[cfg(feature = "std")] impl std::error::Error for Error {} impl core::fmt::Display for Error { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{}", self.msg) } } <file_sep>/regex-capi/ctest/compile #!/bin/sh set -ex cargo build --manifest-path ../Cargo.toml gcc -DDEBUG -o test test.c -ansi -Wall -I../include -L../../target/debug -lrure # If you're using librure.a, then you'll need to link other stuff: # -lutil -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil -lrure <file_sep>/regex-capi/src/rure.rs use std::collections::HashMap; use std::ffi::{CStr, CString}; use std::ops::Deref; use std::ptr; use std::slice; use std::str; use libc::{c_char, size_t}; use regex::bytes; use crate::error::{Error, ErrorKind}; const RURE_FLAG_CASEI: u32 = 1 << 0; const RURE_FLAG_MULTI: u32 = 1 << 1; const RURE_FLAG_DOTNL: u32 = 1 << 2; const RURE_FLAG_SWAP_GREED: u32 = 1 << 3; const RURE_FLAG_SPACE: u32 = 1 << 4; const RURE_FLAG_UNICODE: u32 = 1 << 5; const RURE_DEFAULT_FLAGS: u32 = RURE_FLAG_UNICODE; pub struct Regex { re: bytes::Regex, capture_names: HashMap<String, i32>, } pub struct Options { size_limit: usize, dfa_size_limit: usize, } // The `RegexSet` is not exposed with option support or matching at an // arbitrary position with a crate just yet. To circumvent this, we use // the `Exec` structure directly. pub struct RegexSet { re: bytes::RegexSet, } #[repr(C)] pub struct rure_match { pub start: size_t, pub end: size_t, } pub struct Captures(bytes::CaptureLocations); pub struct Iter { re: *const Regex, last_end: usize, last_match: Option<usize>, } pub struct IterCaptureNames { capture_names: bytes::CaptureNames<'static>, name_ptrs: Vec<*mut c_char>, } impl Deref for Regex { type Target = bytes::Regex; fn deref(&self) -> &bytes::Regex { &self.re } } impl Deref for RegexSet { type Target = bytes::RegexSet; fn deref(&self) -> &bytes::RegexSet { &self.re } } impl Default for Options { fn default() -> Options { Options { size_limit: 10 * (1 << 20), dfa_size_limit: 2 * (1 << 20) } } } ffi_fn! { fn rure_compile_must(pattern: *const c_char) -> *const Regex { let len = unsafe { CStr::from_ptr(pattern).to_bytes().len() }; let pat = pattern as *const u8; let mut err = Error::new(ErrorKind::None); let re = rure_compile( pat, len, RURE_DEFAULT_FLAGS, ptr::null(), &mut err); if err.is_err() { let _ = writeln!(&mut io::stderr(), "{}", err); let _ = writeln!( &mut io::stderr(), "aborting from rure_compile_must"); unsafe { abort() } } re } } ffi_fn! { fn rure_compile( pattern: *const u8, length: size_t, flags: u32, options: *const Options, error: *mut Error, ) -> *const Regex { let pat = unsafe { slice::from_raw_parts(pattern, length) }; let pat = match str::from_utf8(pat) { Ok(pat) => pat, Err(err) => { unsafe { if !error.is_null() { *error = Error::new(ErrorKind::Str(err)); } return ptr::null(); } } }; let mut builder = bytes::RegexBuilder::new(pat); if !options.is_null() { let options = unsafe { &*options }; builder.size_limit(options.size_limit); builder.dfa_size_limit(options.dfa_size_limit); } builder.case_insensitive(flags & RURE_FLAG_CASEI > 0); builder.multi_line(flags & RURE_FLAG_MULTI > 0); builder.dot_matches_new_line(flags & RURE_FLAG_DOTNL > 0); builder.swap_greed(flags & RURE_FLAG_SWAP_GREED > 0); builder.ignore_whitespace(flags & RURE_FLAG_SPACE > 0); builder.unicode(flags & RURE_FLAG_UNICODE > 0); match builder.build() { Ok(re) => { let mut capture_names = HashMap::new(); for (i, name) in re.capture_names().enumerate() { if let Some(name) = name { capture_names.insert(name.to_owned(), i as i32); } } let re = Regex { re: re, capture_names: capture_names, }; Box::into_raw(Box::new(re)) } Err(err) => { unsafe { if !error.is_null() { *error = Error::new(ErrorKind::Regex(err)); } ptr::null() } } } } } ffi_fn! { fn rure_free(re: *const Regex) { unsafe { drop(Box::from_raw(re as *mut Regex)); } } } ffi_fn! { fn rure_is_match( re: *const Regex, haystack: *const u8, len: size_t, start: size_t, ) -> bool { let re = unsafe { &*re }; let haystack = unsafe { slice::from_raw_parts(haystack, len) }; re.is_match_at(haystack, start) } } ffi_fn! { fn rure_find( re: *const Regex, haystack: *const u8, len: size_t, start: size_t, match_info: *mut rure_match, ) -> bool { let re = unsafe { &*re }; let haystack = unsafe { slice::from_raw_parts(haystack, len) }; re.find_at(haystack, start).map(|m| unsafe { if !match_info.is_null() { (*match_info).start = m.start(); (*match_info).end = m.end(); } }).is_some() } } ffi_fn! { fn rure_find_captures( re: *const Regex, haystack: *const u8, len: size_t, start: size_t, captures: *mut Captures, ) -> bool { let re = unsafe { &*re }; let haystack = unsafe { slice::from_raw_parts(haystack, len) }; let slots = unsafe { &mut (*captures).0 }; re.captures_read_at(slots, haystack, start).is_some() } } ffi_fn! { fn rure_shortest_match( re: *const Regex, haystack: *const u8, len: size_t, start: size_t, end: *mut usize, ) -> bool { let re = unsafe { &*re }; let haystack = unsafe { slice::from_raw_parts(haystack, len) }; match re.shortest_match_at(haystack, start) { None => false, Some(i) => { if !end.is_null() { unsafe { *end = i; } } true } } } } ffi_fn! { fn rure_capture_name_index( re: *const Regex, name: *const c_char, ) -> i32 { let re = unsafe { &*re }; let name = unsafe { CStr::from_ptr(name) }; let name = match name.to_str() { Err(_) => return -1, Ok(name) => name, }; re.capture_names.get(name).map(|&i|i).unwrap_or(-1) } } ffi_fn! { fn rure_iter_capture_names_new( re: *const Regex, ) -> *mut IterCaptureNames { let re = unsafe { &*re }; Box::into_raw(Box::new(IterCaptureNames { capture_names: re.re.capture_names(), name_ptrs: Vec::new(), })) } } ffi_fn! { fn rure_iter_capture_names_free(it: *mut IterCaptureNames) { unsafe { let it = &mut *it; while let Some(ptr) = it.name_ptrs.pop() { drop(CString::from_raw(ptr)); } drop(Box::from_raw(it)); } } } ffi_fn! { fn rure_iter_capture_names_next( it: *mut IterCaptureNames, capture_name: *mut *mut c_char, ) -> bool { if capture_name.is_null() { return false; } let it = unsafe { &mut *it }; let cn = match it.capture_names.next() { // Top-level iterator ran out of capture groups None => return false, Some(val) => { let name = match val { // inner Option didn't have a name None => "", Some(name) => name }; name } }; unsafe { let cs = match CString::new(cn.as_bytes()) { Result::Ok(val) => val, Result::Err(_) => return false }; let ptr = cs.into_raw(); it.name_ptrs.push(ptr); *capture_name = ptr; } true } } ffi_fn! { fn rure_iter_new( re: *const Regex, ) -> *mut Iter { Box::into_raw(Box::new(Iter { re: re, last_end: 0, last_match: None, })) } } ffi_fn! { fn rure_iter_free(it: *mut Iter) { unsafe { drop(Box::from_raw(it)); } } } ffi_fn! { fn rure_iter_next( it: *mut Iter, haystack: *const u8, len: size_t, match_info: *mut rure_match, ) -> bool { let it = unsafe { &mut *it }; let re = unsafe { &*it.re }; let text = unsafe { slice::from_raw_parts(haystack, len) }; if it.last_end > text.len() { return false; } let (s, e) = match re.find_at(text, it.last_end) { None => return false, Some(m) => (m.start(), m.end()), }; if s == e { // This is an empty match. To ensure we make progress, start // the next search at the smallest possible starting position // of the next match following this one. it.last_end += 1; // Don't accept empty matches immediately following a match. // Just move on to the next match. if Some(e) == it.last_match { return rure_iter_next(it, haystack, len, match_info); } } else { it.last_end = e; } it.last_match = Some(e); if !match_info.is_null() { unsafe { (*match_info).start = s; (*match_info).end = e; } } true } } ffi_fn! { fn rure_iter_next_captures( it: *mut Iter, haystack: *const u8, len: size_t, captures: *mut Captures, ) -> bool { let it = unsafe { &mut *it }; let re = unsafe { &*it.re }; let slots = unsafe { &mut (*captures).0 }; let text = unsafe { slice::from_raw_parts(haystack, len) }; if it.last_end > text.len() { return false; } let (s, e) = match re.captures_read_at(slots, text, it.last_end) { None => return false, Some(m) => (m.start(), m.end()), }; if s == e { // This is an empty match. To ensure we make progress, start // the next search at the smallest possible starting position // of the next match following this one. it.last_end += 1; // Don't accept empty matches immediately following a match. // Just move on to the next match. if Some(e) == it.last_match { return rure_iter_next_captures(it, haystack, len, captures); } } else { it.last_end = e; } it.last_match = Some(e); true } } ffi_fn! { fn rure_captures_new(re: *const Regex) -> *mut Captures { let re = unsafe { &*re }; let captures = Captures(re.capture_locations()); Box::into_raw(Box::new(captures)) } } ffi_fn! { fn rure_captures_free(captures: *const Captures) { unsafe { drop(Box::from_raw(captures as *mut Captures)); } } } ffi_fn! { fn rure_captures_at( captures: *const Captures, i: size_t, match_info: *mut rure_match, ) -> bool { let locs = unsafe { &(*captures).0 }; match locs.get(i) { Some((start, end)) => { if !match_info.is_null() { unsafe { (*match_info).start = start; (*match_info).end = end; } } true } _ => false } } } ffi_fn! { fn rure_captures_len(captures: *const Captures) -> size_t { unsafe { (*captures).0.len() } } } ffi_fn! { fn rure_options_new() -> *mut Options { Box::into_raw(Box::new(Options::default())) } } ffi_fn! { fn rure_options_free(options: *mut Options) { unsafe { drop(Box::from_raw(options)); } } } ffi_fn! { fn rure_options_size_limit(options: *mut Options, limit: size_t) { let options = unsafe { &mut *options }; options.size_limit = limit; } } ffi_fn! { fn rure_options_dfa_size_limit(options: *mut Options, limit: size_t) { let options = unsafe { &mut *options }; options.dfa_size_limit = limit; } } ffi_fn! { fn rure_compile_set( patterns: *const *const u8, patterns_lengths: *const size_t, patterns_count: size_t, flags: u32, options: *const Options, error: *mut Error ) -> *const RegexSet { let (raw_pats, raw_patsl) = unsafe { ( slice::from_raw_parts(patterns, patterns_count), slice::from_raw_parts(patterns_lengths, patterns_count) ) }; let mut pats = Vec::with_capacity(patterns_count); for (&raw_pat, &raw_patl) in raw_pats.iter().zip(raw_patsl) { let pat = unsafe { slice::from_raw_parts(raw_pat, raw_patl) }; pats.push(match str::from_utf8(pat) { Ok(pat) => pat, Err(err) => { unsafe { if !error.is_null() { *error = Error::new(ErrorKind::Str(err)); } return ptr::null(); } } }); } let mut builder = bytes::RegexSetBuilder::new(pats); if !options.is_null() { let options = unsafe { &*options }; builder.size_limit(options.size_limit); builder.dfa_size_limit(options.dfa_size_limit); } builder.case_insensitive(flags & RURE_FLAG_CASEI > 0); builder.multi_line(flags & RURE_FLAG_MULTI > 0); builder.dot_matches_new_line(flags & RURE_FLAG_DOTNL > 0); builder.swap_greed(flags & RURE_FLAG_SWAP_GREED > 0); builder.ignore_whitespace(flags & RURE_FLAG_SPACE > 0); builder.unicode(flags & RURE_FLAG_UNICODE > 0); match builder.build() { Ok(re) => { Box::into_raw(Box::new(RegexSet { re: re })) } Err(err) => { unsafe { if !error.is_null() { *error = Error::new(ErrorKind::Regex(err)) } ptr::null() } } } } } ffi_fn! { fn rure_set_free(re: *const RegexSet) { unsafe { drop(Box::from_raw(re as *mut RegexSet)); } } } ffi_fn! { fn rure_set_is_match( re: *const RegexSet, haystack: *const u8, len: size_t, start: size_t ) -> bool { let re = unsafe { &*re }; let haystack = unsafe { slice::from_raw_parts(haystack, len) }; re.is_match_at(haystack, start) } } ffi_fn! { fn rure_set_matches( re: *const RegexSet, haystack: *const u8, len: size_t, start: size_t, matches: *mut bool ) -> bool { let re = unsafe { &*re }; let mut matches = unsafe { slice::from_raw_parts_mut(matches, re.len()) }; let haystack = unsafe { slice::from_raw_parts(haystack, len) }; // read_matches_at isn't guaranteed to set non-matches to false for item in matches.iter_mut() { *item = false; } re.matches_read_at(&mut matches, haystack, start) } } ffi_fn! { fn rure_set_len(re: *const RegexSet) -> size_t { unsafe { (*re).len() } } } ffi_fn! { fn rure_escape_must(pattern: *const c_char) -> *const c_char { let len = unsafe { CStr::from_ptr(pattern).to_bytes().len() }; let pat = pattern as *const u8; let mut err = Error::new(ErrorKind::None); let esc = rure_escape(pat, len, &mut err); if err.is_err() { let _ = writeln!(&mut io::stderr(), "{}", err); let _ = writeln!( &mut io::stderr(), "aborting from rure_escape_must"); unsafe { abort() } } esc } } /// A helper function that implements fallible escaping in a way that returns /// an error if escaping failed. /// /// This should ideally be exposed, but it needs API design work. In /// particular, this should not return a C string, but a `const uint8_t *` /// instead, since it may contain a NUL byte. fn rure_escape( pattern: *const u8, length: size_t, error: *mut Error, ) -> *const c_char { let pat: &[u8] = unsafe { slice::from_raw_parts(pattern, length) }; let str_pat = match str::from_utf8(pat) { Ok(val) => val, Err(err) => unsafe { if !error.is_null() { *error = Error::new(ErrorKind::Str(err)); } return ptr::null(); }, }; let esc_pat = regex::escape(str_pat); let c_esc_pat = match CString::new(esc_pat) { Ok(val) => val, Err(err) => unsafe { if !error.is_null() { *error = Error::new(ErrorKind::Nul(err)); } return ptr::null(); }, }; c_esc_pat.into_raw() as *const c_char } ffi_fn! { fn rure_cstring_free(s: *mut c_char) { unsafe { drop(CString::from_raw(s)); } } } <file_sep>/testdata/leftmost-all.toml [[test]] name = "alt" regex = 'foo|foobar' haystack = "foobar" matches = [[0, 6]] match-kind = "all" search-kind = "leftmost" [[test]] name = "multi" regex = ['foo', 'foobar'] haystack = "foobar" matches = [ { id = 1, span = [0, 6] }, ] match-kind = "all" search-kind = "leftmost" [[test]] name = "dotall" regex = '(?s:.)' haystack = "foobar" matches = [[5, 6]] match-kind = "all" search-kind = "leftmost" <file_sep>/regex-automata/tests/dfa/suite.rs use { anyhow::Result, regex_automata::{ dfa::{ self, dense, regex::Regex, sparse, Automaton, OverlappingState, StartKind, }, nfa::thompson, util::{prefilter::Prefilter, syntax}, Anchored, Input, PatternSet, }, regex_syntax::hir, regex_test::{ CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, TestRunner, }, }; use crate::{create_input, suite, untestify_kind}; const EXPANSIONS: &[&str] = &["is_match", "find", "which"]; /// Runs the test suite with the default configuration. #[test] fn unminimized_default() -> Result<()> { let builder = Regex::builder(); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), dense_compiler(builder)) .assert(); Ok(()) } /// Runs the test suite with the default configuration and a prefilter enabled, /// if one can be built. #[test] fn unminimized_prefilter() -> Result<()> { let my_compiler = |test: &RegexTest, regexes: &[String]| { // Parse regexes as HIRs so we can get literals to build a prefilter. let mut hirs = vec![]; for pattern in regexes.iter() { hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); } let kind = match untestify_kind(test.match_kind()) { None => return Ok(CompiledRegex::skip()), Some(kind) => kind, }; let pre = Prefilter::from_hirs_prefix(kind, &hirs); let mut builder = Regex::builder(); builder.dense(dense::DFA::config().prefilter(pre)); compiler(builder, |_, _, re| { Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, test) })) })(test, regexes) }; TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), my_compiler) .assert(); Ok(()) } /// Runs the test suite with start states specialized. #[test] fn unminimized_specialized_start_states() -> Result<()> { let mut builder = Regex::builder(); builder.dense(dense::Config::new().specialize_start_states(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), dense_compiler(builder)) .assert(); Ok(()) } /// Runs the test suite with byte classes disabled. #[test] fn unminimized_no_byte_class() -> Result<()> { let mut builder = Regex::builder(); builder.dense(dense::Config::new().byte_classes(false)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), dense_compiler(builder)) .assert(); Ok(()) } /// Runs the test suite with NFA shrinking enabled. #[test] fn unminimized_nfa_shrink() -> Result<()> { let mut builder = Regex::builder(); builder.thompson(thompson::Config::new().shrink(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), dense_compiler(builder)) .assert(); Ok(()) } /// Runs the test suite on a minimized DFA with an otherwise default /// configuration. #[test] fn minimized_default() -> Result<()> { let mut builder = Regex::builder(); builder.dense(dense::Config::new().minimize(true)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), dense_compiler(builder)) .assert(); Ok(()) } /// Runs the test suite on a minimized DFA with byte classes disabled. #[test] fn minimized_no_byte_class() -> Result<()> { let mut builder = Regex::builder(); builder.dense(dense::Config::new().minimize(true).byte_classes(false)); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), dense_compiler(builder)) .assert(); Ok(()) } /// Runs the test suite on a sparse unminimized DFA. #[test] fn sparse_unminimized_default() -> Result<()> { let builder = Regex::builder(); TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), sparse_compiler(builder)) .assert(); Ok(()) } /// Runs the test suite on a sparse unminimized DFA with prefilters enabled. #[test] fn sparse_unminimized_prefilter() -> Result<()> { let my_compiler = |test: &RegexTest, regexes: &[String]| { // Parse regexes as HIRs so we can get literals to build a prefilter. let mut hirs = vec![]; for pattern in regexes.iter() { hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); } let kind = match untestify_kind(test.match_kind()) { None => return Ok(CompiledRegex::skip()), Some(kind) => kind, }; let pre = Prefilter::from_hirs_prefix(kind, &hirs); let mut builder = Regex::builder(); builder.dense(dense::DFA::config().prefilter(pre)); compiler(builder, |builder, _, re| { let fwd = re.forward().to_sparse()?; let rev = re.reverse().to_sparse()?; let re = builder.build_from_dfas(fwd, rev); Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, test) })) })(test, regexes) }; TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), my_compiler) .assert(); Ok(()) } /// Another basic sanity test that checks we can serialize and then deserialize /// a regex, and that the resulting regex can be used for searching correctly. #[test] fn serialization_unminimized_default() -> Result<()> { let builder = Regex::builder(); let my_compiler = |builder| { compiler(builder, |builder, _, re| { let builder = builder.clone(); let (fwd_bytes, _) = re.forward().to_bytes_native_endian(); let (rev_bytes, _) = re.reverse().to_bytes_native_endian(); Ok(CompiledRegex::compiled(move |test| -> TestResult { let fwd: dense::DFA<&[u32]> = dense::DFA::from_bytes(&fwd_bytes).unwrap().0; let rev: dense::DFA<&[u32]> = dense::DFA::from_bytes(&rev_bytes).unwrap().0; let re = builder.build_from_dfas(fwd, rev); run_test(&re, test) })) }) }; TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), my_compiler(builder)) .assert(); Ok(()) } /// A basic sanity test that checks we can serialize and then deserialize a /// regex using sparse DFAs, and that the resulting regex can be used for /// searching correctly. #[test] fn sparse_serialization_unminimized_default() -> Result<()> { let builder = Regex::builder(); let my_compiler = |builder| { compiler(builder, |builder, _, re| { let builder = builder.clone(); let fwd_bytes = re.forward().to_sparse()?.to_bytes_native_endian(); let rev_bytes = re.reverse().to_sparse()?.to_bytes_native_endian(); Ok(CompiledRegex::compiled(move |test| -> TestResult { let fwd: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&fwd_bytes).unwrap().0; let rev: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&rev_bytes).unwrap().0; let re = builder.build_from_dfas(fwd, rev); run_test(&re, test) })) }) }; TestRunner::new()? .expand(EXPANSIONS, |t| t.compiles()) .blacklist("expensive") .test_iter(suite()?.iter(), my_compiler(builder)) .assert(); Ok(()) } fn dense_compiler( builder: dfa::regex::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { compiler(builder, |_, _, re| { Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, test) })) }) } fn sparse_compiler( builder: dfa::regex::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { compiler(builder, |builder, _, re| { let fwd = re.forward().to_sparse()?; let rev = re.reverse().to_sparse()?; let re = builder.build_from_dfas(fwd, rev); Ok(CompiledRegex::compiled(move |test| -> TestResult { run_test(&re, test) })) }) } fn compiler( mut builder: dfa::regex::Builder, mut create_matcher: impl FnMut( &dfa::regex::Builder, Option<Prefilter>, Regex, ) -> Result<CompiledRegex>, ) -> impl FnMut(&RegexTest, &[String]) -> Result<CompiledRegex> { move |test, regexes| { // Parse regexes as HIRs for some analysis below. let mut hirs = vec![]; for pattern in regexes.iter() { hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); } // Get a prefilter in case the test wants it. let kind = match untestify_kind(test.match_kind()) { None => return Ok(CompiledRegex::skip()), Some(kind) => kind, }; let pre = Prefilter::from_hirs_prefix(kind, &hirs); // Check if our regex contains things that aren't supported by DFAs. // That is, Unicode word boundaries when searching non-ASCII text. if !test.haystack().is_ascii() { for hir in hirs.iter() { let looks = hir.properties().look_set(); if looks.contains(hir::Look::WordUnicode) || looks.contains(hir::Look::WordUnicodeNegate) { return Ok(CompiledRegex::skip()); } } } if !configure_regex_builder(test, &mut builder) { return Ok(CompiledRegex::skip()); } create_matcher(&builder, pre, builder.build_many(&regexes)?) } } fn run_test<A: Automaton>(re: &Regex<A>, test: &RegexTest) -> TestResult { let input = create_input(test); match test.additional_name() { "is_match" => TestResult::matched(re.is_match(input.earliest(true))), "find" => match test.search_kind() { SearchKind::Earliest | SearchKind::Leftmost => { let input = input.earliest(test.search_kind() == SearchKind::Earliest); TestResult::matches( re.find_iter(input) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: m.pattern().as_usize(), span: Span { start: m.start(), end: m.end() }, }), ) } SearchKind::Overlapping => { try_search_overlapping(re, &input).unwrap() } }, "which" => match test.search_kind() { SearchKind::Earliest | SearchKind::Leftmost => { // There are no "which" APIs for standard searches. TestResult::skip() } SearchKind::Overlapping => { let dfa = re.forward(); let mut patset = PatternSet::new(dfa.pattern_len()); dfa.try_which_overlapping_matches(&input, &mut patset) .unwrap(); TestResult::which(patset.iter().map(|p| p.as_usize())) } }, name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Configures the given regex builder with all relevant settings on the given /// regex test. /// /// If the regex test has a setting that is unsupported, then this returns /// false (implying the test should be skipped). fn configure_regex_builder( test: &RegexTest, builder: &mut dfa::regex::Builder, ) -> bool { let match_kind = match untestify_kind(test.match_kind()) { None => return false, Some(k) => k, }; let starts = if test.anchored() { StartKind::Anchored } else { StartKind::Unanchored }; let mut dense_config = dense::Config::new() .start_kind(starts) .match_kind(match_kind) .unicode_word_boundary(true); // When doing an overlapping search, we might try to find the start of each // match with a custom search routine. In that case, we need to tell the // reverse search (for the start offset) which pattern to look for. The // only way that API works is when anchored starting states are compiled // for each pattern. This does technically also enable it for the forward // DFA, but we're okay with that. if test.search_kind() == SearchKind::Overlapping { dense_config = dense_config.starts_for_each_pattern(true); } builder .syntax(config_syntax(test)) .thompson(config_thompson(test)) .dense(dense_config); true } /// Configuration of a Thompson NFA compiler from a regex test. fn config_thompson(test: &RegexTest) -> thompson::Config { let mut lookm = regex_automata::util::look::LookMatcher::new(); lookm.set_line_terminator(test.line_terminator()); thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) } /// Configuration of the regex syntax from a regex test. fn config_syntax(test: &RegexTest) -> syntax::Config { syntax::Config::new() .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .utf8(test.utf8()) .line_terminator(test.line_terminator()) } /// Execute an overlapping search, and for each match found, also find its /// overlapping starting positions. /// /// N.B. This routine used to be part of the crate API, but 1) it wasn't clear /// to me how useful it was and 2) it wasn't clear to me what its semantics /// should be. In particular, a potentially surprising footgun of this routine /// that it is worst case *quadratic* in the size of the haystack. Namely, it's /// possible to report a match at every position, and for every such position, /// scan all the way to the beginning of the haystack to find the starting /// position. Typical leftmost non-overlapping searches don't suffer from this /// because, well, matches can't overlap. So subsequent searches after a match /// is found don't revisit previously scanned parts of the haystack. /// /// Its semantics can be strange for other reasons too. For example, given /// the regex '.*' and the haystack 'zz', the full set of overlapping matches /// is: [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2]. The ordering of /// those matches is quite strange, but makes sense when you think about the /// implementation: an end offset is found left-to-right, and then one or more /// starting offsets are found right-to-left. /// /// Nevertheless, we provide this routine in our test suite because it's /// useful to test the low level DFA overlapping search and our test suite /// is written in a way that requires starting offsets. fn try_search_overlapping<A: Automaton>( re: &Regex<A>, input: &Input<'_>, ) -> Result<TestResult> { let mut matches = vec![]; let mut fwd_state = OverlappingState::start(); let (fwd_dfa, rev_dfa) = (re.forward(), re.reverse()); while let Some(end) = { fwd_dfa.try_search_overlapping_fwd(input, &mut fwd_state)?; fwd_state.get_match() } { let revsearch = input .clone() .range(input.start()..end.offset()) .anchored(Anchored::Pattern(end.pattern())) .earliest(false); let mut rev_state = OverlappingState::start(); while let Some(start) = { rev_dfa.try_search_overlapping_rev(&revsearch, &mut rev_state)?; rev_state.get_match() } { let span = Span { start: start.offset(), end: end.offset() }; let mat = Match { id: end.pattern().as_usize(), span }; matches.push(mat); } } Ok(TestResult::matches(matches)) } <file_sep>/testdata/iter.toml [[test]] name = "1" regex = "a" haystack = "aaa" matches = [[0, 1], [1, 2], [2, 3]] [[test]] name = "2" regex = "a" haystack = "aba" matches = [[0, 1], [2, 3]] [[test]] name = "empty1" regex = '' haystack = '' matches = [[0, 0]] [[test]] name = "empty2" regex = '' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty3" regex = '(?:)' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty4" regex = '(?:)*' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty5" regex = '(?:)+' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty6" regex = '(?:)?' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty7" regex = '(?:)(?:)' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty8" regex = '(?:)+|z' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty9" regex = 'z|(?:)+' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty10" regex = '(?:)+|b' haystack = 'abc' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty11" regex = 'b|(?:)+' haystack = 'abc' matches = [[0, 0], [1, 2], [3, 3]] [[test]] name = "start1" regex = "^a" haystack = "a" matches = [[0, 1]] [[test]] name = "start2" regex = "^a" haystack = "aa" matches = [[0, 1]] [[test]] name = "anchored1" regex = "a" haystack = "a" matches = [[0, 1]] anchored = true # This test is pretty subtle. It demonstrates the crucial difference between # '^a' and 'a' compiled in 'anchored' mode. The former regex exclusively # matches at the start of a haystack and nowhere else. The latter regex has # no such restriction, but its automaton is constructed such that it lacks a # `.*?` prefix. So it can actually produce matches at multiple locations. # The anchored3 test drives this point home. [[test]] name = "anchored2" regex = "a" haystack = "aa" matches = [[0, 1], [1, 2]] anchored = true # Unlikely anchored2, this test stops matching anything after it sees `b` # since it lacks a `.*?` prefix. Since it is looking for 'a' but sees 'b', it # determines that there are no remaining matches. [[test]] name = "anchored3" regex = "a" haystack = "aaba" matches = [[0, 1], [1, 2]] anchored = true [[test]] name = "nonempty-followedby-empty" regex = 'abc|.*?' haystack = "abczzz" matches = [[0, 3], [4, 4], [5, 5], [6, 6]] [[test]] name = "nonempty-followedby-oneempty" regex = 'abc|.*?' haystack = "abcz" matches = [[0, 3], [4, 4]] [[test]] name = "nonempty-followedby-onemixed" regex = 'abc|.*?' haystack = "abczabc" matches = [[0, 3], [4, 7]] [[test]] name = "nonempty-followedby-twomixed" regex = 'abc|.*?' haystack = "abczzabc" matches = [[0, 3], [4, 4], [5, 8]] <file_sep>/regex-syntax/src/debug.rs /// A type that wraps a single byte with a convenient fmt::Debug impl that /// escapes the byte. pub(crate) struct Byte(pub(crate) u8); impl core::fmt::Debug for Byte { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { // Special case ASCII space. It's too hard to read otherwise, so // put quotes around it. I sometimes wonder whether just '\x20' would // be better... if self.0 == b' ' { return write!(f, "' '"); } // 10 bytes is enough to cover any output from ascii::escape_default. let mut bytes = [0u8; 10]; let mut len = 0; for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { // capitalize \xab to \xAB if i >= 2 && b'a' <= b && b <= b'f' { b -= 32; } bytes[len] = b; len += 1; } write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) } } /// A type that provides a human readable debug impl for arbitrary bytes. /// /// This generally works best when the bytes are presumed to be mostly UTF-8, /// but will work for anything. /// /// N.B. This is copied nearly verbatim from regex-automata. Sigh. pub(crate) struct Bytes<'a>(pub(crate) &'a [u8]); impl<'a> core::fmt::Debug for Bytes<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "\"")?; // This is a sad re-implementation of a similar impl found in bstr. let mut bytes = self.0; while let Some(result) = utf8_decode(bytes) { let ch = match result { Ok(ch) => ch, Err(byte) => { write!(f, r"\x{:02x}", byte)?; bytes = &bytes[1..]; continue; } }; bytes = &bytes[ch.len_utf8()..]; match ch { '\0' => write!(f, "\\0")?, // ASCII control characters except \0, \n, \r, \t '\x01'..='\x08' | '\x0b' | '\x0c' | '\x0e'..='\x19' | '\x7f' => { write!(f, "\\x{:02x}", u32::from(ch))?; } '\n' | '\r' | '\t' | _ => { write!(f, "{}", ch.escape_debug())?; } } } write!(f, "\"")?; Ok(()) } } /// Decodes the next UTF-8 encoded codepoint from the given byte slice. /// /// If no valid encoding of a codepoint exists at the beginning of the given /// byte slice, then the first byte is returned instead. /// /// This returns `None` if and only if `bytes` is empty. pub(crate) fn utf8_decode(bytes: &[u8]) -> Option<Result<char, u8>> { fn len(byte: u8) -> Option<usize> { if byte <= 0x7F { return Some(1); } else if byte & 0b1100_0000 == 0b1000_0000 { return None; } else if byte <= 0b1101_1111 { Some(2) } else if byte <= 0b1110_1111 { Some(3) } else if byte <= 0b1111_0111 { Some(4) } else { None } } if bytes.is_empty() { return None; } let len = match len(bytes[0]) { None => return Some(Err(bytes[0])), Some(len) if len > bytes.len() => return Some(Err(bytes[0])), Some(1) => return Some(Ok(char::from(bytes[0]))), Some(len) => len, }; match core::str::from_utf8(&bytes[..len]) { Ok(s) => Some(Ok(s.chars().next().unwrap())), Err(_) => Some(Err(bytes[0])), } } <file_sep>/regex-automata/tests/gen/README.md This directory contains tests for serialized objects from the regex-automata crate. Currently, there are only two supported such objects: dense and sparse DFAs. The idea behind these tests is to commit some serialized objects and run some basic tests by deserializing them and running searches and ensuring they are correct. We also make sure these are run under Miri, since deserialization is one of the biggest places where undefined behavior might occur in this crate (at the time of writing). The main thing we're testing is that the *current* code can still deserialize *old* objects correctly. Generally speaking, compatibility extends to semver compatible releases of this crate. Beyond that, no promises are made, although in practice callers can at least depend on errors occurring. (The serialized format always includes a version number, and incompatible changes increment that version number such that an error will occur if an unsupported version is detected.) To generate the dense DFAs, I used this command: ``` $ regex-cli generate serialize dense regex \ MULTI_PATTERN_V2 \ tests/gen/dense/ \ --rustfmt \ --safe \ --starts-for-each-pattern \ --specialize-start-states \ --start-kind both \ --unicode-word-boundary \ --minimize \ '\b[a-zA-Z]+\b' \ '(?m)^\S+$' \ '(?Rm)^\S+$' ``` And to generate the sparse DFAs, I used this command, which is the same as above, but with `s/dense/sparse/g`. ``` $ regex-cli generate serialize sparse regex \ MULTI_PATTERN_V2 \ tests/gen/sparse/ \ --rustfmt \ --safe \ --starts-for-each-pattern \ --specialize-start-states \ --start-kind both \ --unicode-word-boundary \ --minimize \ '\b[a-zA-Z]+\b' \ '(?m)^\S+$' \ '(?Rm)^\S+$' ``` The idea is to try to enable as many of the DFA's options as possible in order to test that serialization works for all of them. Arguably we should increase test coverage here, but this is a start. Note that in particular, this does not need to test that serialization and deserialization correctly roundtrips on its own. Indeed, the normal regex test suite has a test that does a serialization round trip for every test supported by DFAs. So that has very good coverage. What we're interested in testing here is our compatibility promise: do DFAs generated with an older revision of the code still deserialize correctly? <file_sep>/regex-automata/src/util/lazy.rs /*! A lazily initialized value for safe sharing between threads. The principal type in this module is `Lazy`, which makes it easy to construct values that are shared safely across multiple threads simultaneously. */ use core::fmt; /// A lazily initialized value that implements `Deref` for `T`. /// /// A `Lazy` takes an initialization function and permits callers from any /// thread to access the result of that initialization function in a safe /// manner. In effect, this permits one-time initialization of global resources /// in a (possibly) multi-threaded program. /// /// This type and its functionality are available even when neither the `alloc` /// nor the `std` features are enabled. In exchange, a `Lazy` does **not** /// guarantee that the given `create` function is called at most once. It /// might be called multiple times. Moreover, a call to `Lazy::get` (either /// explicitly or implicitly via `Lazy`'s `Deref` impl) may block until a `T` /// is available. /// /// This is very similar to `lazy_static` or `once_cell`, except it doesn't /// guarantee that the initialization function will be run once and it works /// in no-alloc no-std environments. With that said, if you need stronger /// guarantees or a more flexible API, then it is recommended to use either /// `lazy_static` or `once_cell`. /// /// # Warning: may use a spin lock /// /// When this crate is compiled _without_ the `alloc` feature, then this type /// may used a spin lock internally. This can have subtle effects that may /// be undesirable. See [Spinlocks Considered Harmful][spinharm] for a more /// thorough treatment of this topic. /// /// [spinharm]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html /// /// # Example /// /// This type is useful for creating regexes once, and then using them from /// multiple threads simultaneously without worrying about synchronization. /// /// ``` /// use regex_automata::{dfa::regex::Regex, util::lazy::Lazy, Match}; /// /// static RE: Lazy<Regex> = Lazy::new(|| Regex::new("foo[0-9]+bar").unwrap()); /// /// let expected = Some(Match::must(0, 3..14)); /// assert_eq!(expected, RE.find(b"zzzfoo12345barzzz")); /// ``` pub struct Lazy<T, F = fn() -> T>(lazy::Lazy<T, F>); impl<T, F> Lazy<T, F> { /// Create a new `Lazy` value that is initialized via the given function. /// /// The `T` type is automatically inferred from the return type of the /// `create` function given. pub const fn new(create: F) -> Lazy<T, F> { Lazy(lazy::Lazy::new(create)) } } impl<T, F: Fn() -> T> Lazy<T, F> { /// Return a reference to the lazily initialized value. /// /// This routine may block if another thread is initializing a `T`. /// /// Note that given a `x` which has type `Lazy`, this must be called via /// `Lazy::get(x)` and not `x.get()`. This routine is defined this way /// because `Lazy` impls `Deref` with a target of `T`. /// /// # Panics /// /// This panics if the `create` function inside this lazy value panics. /// If the panic occurred in another thread, then this routine _may_ also /// panic (but is not guaranteed to do so). pub fn get(this: &Lazy<T, F>) -> &T { this.0.get() } } impl<T, F: Fn() -> T> core::ops::Deref for Lazy<T, F> { type Target = T; fn deref(&self) -> &T { Lazy::get(self) } } impl<T: fmt::Debug, F: Fn() -> T> fmt::Debug for Lazy<T, F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } #[cfg(feature = "alloc")] mod lazy { use core::{ fmt, marker::PhantomData, sync::atomic::{AtomicPtr, Ordering}, }; use alloc::boxed::Box; /// A non-std lazy initialized value. /// /// This might run the initialization function more than once, but will /// never block. /// /// I wish I could get these semantics into the non-alloc non-std Lazy /// type below, but I'm not sure how to do it. If you can do an alloc, /// then the implementation becomes very simple if you don't care about /// redundant work precisely because a pointer can be atomically swapped. /// /// Perhaps making this approach work in the non-alloc non-std case /// requires asking the caller for a pointer? It would make the API less /// convenient I think. pub(super) struct Lazy<T, F> { data: AtomicPtr<T>, create: F, // This indicates to the compiler that this type can drop T. It's not // totally clear how the absence of this marker could lead to trouble, // but putting here doesn't have any downsides so we hedge until somone // can from the Unsafe Working Group can tell us definitively that we // don't need it. // // See: https://github.com/BurntSushi/regex-automata/issues/30 owned: PhantomData<Box<T>>, } // SAFETY: So long as T and &T (and F and &F) can themselves be safely // shared among threads, so to can a Lazy<T, _>. Namely, the Lazy API only // permits accessing a &T and initialization is free of data races. So if T // is thread safe, then so to is Lazy<T, _>. // // We specifically require that T: Send in order for Lazy<T> to be Sync. // Without that requirement, it's possible to send a T from one thread to // another via Lazy's destructor. // // It's not clear whether we need F: Send+Sync for Lazy to be Sync. But // we're conservative for now and keep both. unsafe impl<T: Send + Sync, F: Send + Sync> Sync for Lazy<T, F> {} impl<T, F> Lazy<T, F> { /// Create a new alloc but non-std lazy value that is racily /// initialized. That is, the 'create' function may be called more than /// once. pub(super) const fn new(create: F) -> Lazy<T, F> { Lazy { data: AtomicPtr::new(core::ptr::null_mut()), create, owned: PhantomData, } } } impl<T, F: Fn() -> T> Lazy<T, F> { /// Get the underlying lazy value. If it hasn't been initialized /// yet, then always attempt to initialize it (even if some other /// thread is initializing it) and atomically attach it to this lazy /// value before returning it. pub(super) fn get(&self) -> &T { if let Some(data) = self.poll() { return data; } let data = (self.create)(); let mut ptr = Box::into_raw(Box::new(data)); // We attempt to stuff our initialized value into our atomic // pointer. Upon success, we don't need to do anything. But if // someone else beat us to the punch, then we need to make sure // our newly created value is dropped. let result = self.data.compare_exchange( core::ptr::null_mut(), ptr, Ordering::AcqRel, Ordering::Acquire, ); if let Err(old) = result { // SAFETY: We created 'ptr' via Box::into_raw above, so turning // it back into a Box via from_raw is safe. drop(unsafe { Box::from_raw(ptr) }); ptr = old; } // SAFETY: We just set the pointer above to a non-null value, even // in the error case, and set it to a fully initialized value // returned by 'create'. unsafe { &*ptr } } /// If this lazy value has been initialized successfully, then return /// that value. Otherwise return None immediately. This never attempts /// to run initialization itself. fn poll(&self) -> Option<&T> { let ptr = self.data.load(Ordering::Acquire); if ptr.is_null() { return None; } // SAFETY: We just checked that the pointer is not null. Since it's // not null, it must have been fully initialized by 'get' at some // point. Some(unsafe { &*ptr }) } } impl<T: fmt::Debug, F: Fn() -> T> fmt::Debug for Lazy<T, F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Lazy").field("data", &self.poll()).finish() } } impl<T, F> Drop for Lazy<T, F> { fn drop(&mut self) { let ptr = *self.data.get_mut(); if !ptr.is_null() { // SAFETY: We just checked that 'ptr' is not null. And since // we have exclusive access, there are no races to worry about. drop(unsafe { Box::from_raw(ptr) }); } } } } #[cfg(not(feature = "alloc"))] mod lazy { use core::{ cell::Cell, fmt, mem::MaybeUninit, panic::{RefUnwindSafe, UnwindSafe}, sync::atomic::{AtomicU8, Ordering}, }; /// Our 'Lazy' value can be in one of three states: /// /// * INIT is where it starts, and also ends up back here if the /// 'create' routine panics. /// * BUSY is where it sits while initialization is running in exactly /// one thread. /// * DONE is where it sits after 'create' has completed and 'data' has /// been fully initialized. const LAZY_STATE_INIT: u8 = 0; const LAZY_STATE_BUSY: u8 = 1; const LAZY_STATE_DONE: u8 = 2; /// A non-alloc non-std lazy initialized value. /// /// This guarantees initialization only happens once, but uses a spinlock /// to block in the case of simultaneous access. Blocking occurs so that /// one thread waits while another thread initializes the value. /// /// I would much rather have the semantics of the 'alloc' Lazy type above. /// Namely, that we might run the initialization function more than once, /// but we never otherwise block. However, I don't know how to do that in /// a non-alloc non-std context. pub(super) struct Lazy<T, F> { state: AtomicU8, create: Cell<Option<F>>, data: Cell<MaybeUninit<T>>, } // SAFETY: So long as T and &T (and F and &F) can themselves be safely // shared among threads, so to can a Lazy<T, _>. Namely, the Lazy API only // permits accessing a &T and initialization is free of data races. So if T // is thread safe, then so to is Lazy<T, _>. unsafe impl<T: Send + Sync, F: Send + Sync> Sync for Lazy<T, F> {} // A reference to a Lazy is unwind safe because we specifically take // precautions to poison all accesses to a Lazy if the caller-provided // 'create' function panics. impl<T: UnwindSafe, F: UnwindSafe + RefUnwindSafe> RefUnwindSafe for Lazy<T, F> { } impl<T, F> Lazy<T, F> { /// Create a new non-alloc non-std lazy value that is initialized /// exactly once on first use using the given function. pub(super) const fn new(create: F) -> Lazy<T, F> { Lazy { state: AtomicU8::new(LAZY_STATE_INIT), create: Cell::new(Some(create)), data: Cell::new(MaybeUninit::uninit()), } } } impl<T, F: FnOnce() -> T> Lazy<T, F> { /// Get the underlying lazy value. If it isn't been initialized /// yet, then either initialize it or block until some other thread /// initializes it. If the 'create' function given to Lazy::new panics /// (even in another thread), then this panics too. pub(super) fn get(&self) -> &T { // This is effectively a spinlock. We loop until we enter a DONE // state, and if possible, initialize it ourselves. The only way // we exit the loop is if 'create' panics, we initialize 'data' or // some other thread initializes 'data'. // // Yes, I have read spinlocks considered harmful[1]. And that // article is why this spinlock is only active when 'alloc' isn't // enabled. I did this because I don't think there is really // another choice without 'alloc', other than not providing this at // all. But I think that's a big bummer. // // [1]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html while self.state.load(Ordering::Acquire) != LAZY_STATE_DONE { // Check if we're the first ones to get here. If so, we'll be // the ones who initialize. let result = self.state.compare_exchange( LAZY_STATE_INIT, LAZY_STATE_BUSY, Ordering::AcqRel, Ordering::Acquire, ); // This means we saw the INIT state and nobody else can. So we // must take responsibility for initializing. And by virtue of // observing INIT, we have also told anyone else trying to // get here that we are BUSY. If someone else sees BUSY, then // they will spin until we finish initialization. if let Ok(_) = result { // Since we are guaranteed to be the only ones here, we // know that 'create' is there... Unless someone else got // here before us and 'create' panicked. In which case, // 'self.create' is now 'None' and we forward the panic // to the caller. (i.e., We implement poisoning.) // // SAFETY: Our use of 'self.state' guarantees that we are // the only thread executing this line, and thus there are // no races. let create = unsafe { (*self.create.as_ptr()).take().expect( "Lazy's create function panicked, \ preventing initialization, poisoning current thread", ) }; let guard = Guard { state: &self.state }; // SAFETY: Our use of 'self.state' guarantees that we are // the only thread executing this line, and thus there are // no races. unsafe { (*self.data.as_ptr()).as_mut_ptr().write(create()); } // All is well. 'self.create' ran successfully, so we // forget the guard. core::mem::forget(guard); // Everything is initialized, so we can declare success. self.state.store(LAZY_STATE_DONE, Ordering::Release); break; } core::hint::spin_loop(); } // We only get here if data is fully initialized, and thus poll // will always return something. self.poll().unwrap() } /// If this lazy value has been initialized successfully, then return /// that value. Otherwise return None immediately. This never blocks. fn poll(&self) -> Option<&T> { if self.state.load(Ordering::Acquire) == LAZY_STATE_DONE { // SAFETY: The DONE state only occurs when data has been fully // initialized. Some(unsafe { &*(*self.data.as_ptr()).as_ptr() }) } else { None } } } impl<T: fmt::Debug, F: FnMut() -> T> fmt::Debug for Lazy<T, F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Lazy") .field("state", &self.state.load(Ordering::Acquire)) .field("create", &"<closure>") .field("data", &self.poll()) .finish() } } impl<T, F> Drop for Lazy<T, F> { fn drop(&mut self) { if *self.state.get_mut() == LAZY_STATE_DONE { // SAFETY: state is DONE if and only if data has been fully // initialized. At which point, it is safe to drop. unsafe { // MSRV(1.60): Use assume_init_drop. The below is how // assume_init_drop is implemented. core::ptr::drop_in_place( (*self.data.as_ptr()).as_mut_ptr(), ) } } } } /// A guard that will reset a Lazy's state back to INIT when dropped. The /// idea here is to 'forget' this guard on success. On failure (when a /// panic occurs), the Drop impl runs and causes all in-progress and future /// 'get' calls to panic. Without this guard, all in-progress and future /// 'get' calls would spin forever. Crashing is much better than getting /// stuck in an infinite loop. struct Guard<'a> { state: &'a AtomicU8, } impl<'a> Drop for Guard<'a> { fn drop(&mut self) { // We force ourselves back into an INIT state. This will in turn // cause any future 'get' calls to attempt calling 'self.create' // again which will in turn panic because 'self.create' will now // be 'None'. self.state.store(LAZY_STATE_INIT, Ordering::Release); } } } #[cfg(test)] mod tests { use super::*; fn assert_send<T: Send>() {} fn assert_sync<T: Sync>() {} fn assert_unwind<T: core::panic::UnwindSafe>() {} fn assert_refunwind<T: core::panic::RefUnwindSafe>() {} #[test] fn oibits() { assert_send::<Lazy<u64>>(); assert_sync::<Lazy<u64>>(); assert_unwind::<Lazy<u64>>(); assert_refunwind::<Lazy<u64>>(); } // This is a regression test because we used to rely on the inferred Sync // impl for the Lazy type defined above (for 'alloc' mode). In the // inferred impl, it only requires that T: Sync for Lazy<T>: Sync. But // if we have that, we can actually make use of the fact that Lazy<T> drops // T to create a value on one thread and drop it on another. This *should* // require T: Send, but our missing bounds before let it sneak by. // // Basically, this test should not compile, so we... comment it out. We // don't have a great way of testing compile-fail tests right now. // // See: https://github.com/BurntSushi/regex-automata/issues/30 /* #[test] fn sync_not_send() { #[allow(dead_code)] fn inner<T: Sync + Default>() { let lazy = Lazy::new(move || T::default()); std::thread::scope(|scope| { scope.spawn(|| { Lazy::get(&lazy); // We create T in this thread }); }); // And drop in this thread. drop(lazy); // So we have send a !Send type over threads. (with some more // legwork, its possible to even sneak the value out of drop // through thread local) } } */ } <file_sep>/testdata/misc.toml [[test]] name = "ascii-literal" regex = "a" haystack = "a" matches = [[0, 1]] [[test]] name = "ascii-literal-not" regex = "a" haystack = "z" matches = [] [[test]] name = "ascii-literal-anchored" regex = "a" haystack = "a" matches = [[0, 1]] anchored = true [[test]] name = "ascii-literal-anchored-not" regex = "a" haystack = "z" matches = [] anchored = true [[test]] name = "anchor-start-end-line" regex = '(?m)^bar$' haystack = "foo\nbar\nbaz" matches = [[4, 7]] [[test]] name = "prefix-literal-match" regex = '^abc' haystack = "abc" matches = [[0, 3]] [[test]] name = "prefix-literal-match-ascii" regex = '^abc' haystack = "abc" matches = [[0, 3]] unicode = false utf8 = false [[test]] name = "prefix-literal-no-match" regex = '^abc' haystack = "zabc" matches = [] [[test]] name = "one-literal-edge" regex = 'abc' haystack = "xxxxxab" matches = [] [[test]] name = "terminates" regex = 'a$' haystack = "a" matches = [[0, 1]] [[test]] name = "suffix-100" regex = '.*abcd' haystack = "abcd" matches = [[0, 4]] [[test]] name = "suffix-200" regex = '.*(?:abcd)+' haystack = "abcd" matches = [[0, 4]] [[test]] name = "suffix-300" regex = '.*(?:abcd)+' haystack = "abcdabcd" matches = [[0, 8]] [[test]] name = "suffix-400" regex = '.*(?:abcd)+' haystack = "abcdxabcd" matches = [[0, 9]] [[test]] name = "suffix-500" regex = '.*x(?:abcd)+' haystack = "abcdxabcd" matches = [[0, 9]] [[test]] name = "suffix-600" regex = '[^abcd]*x(?:abcd)+' haystack = "abcdxabcd" matches = [[4, 9]] <file_sep>/regex-automata/src/meta/limited.rs /*! This module defines two bespoke reverse DFA searching routines. (One for the lazy DFA and one for the fully compiled DFA.) These routines differ from the usual ones by permitting the caller to specify a minimum starting position. That is, the search will begin at `input.end()` and will usually stop at `input.start()`, unless `min_start > input.start()`, in which case, the search will stop at `min_start`. In other words, this lets you say, "no, the search must not extend past this point, even if it's within the bounds of the given `Input`." And if the search *does* want to go past that point, it stops and returns a "may be quadratic" error, which indicates that the caller should retry using some other technique. These routines specifically exist to protect against quadratic behavior when employing the "reverse suffix" and "reverse inner" optimizations. Without the backstop these routines provide, it is possible for parts of the haystack to get re-scanned over and over again. The backstop not only prevents this, but *tells you when it is happening* so that you can change the strategy. Why can't we just use the normal search routines? We could use the normal search routines and just set the start bound on the provided `Input` to our `min_start` position. The problem here is that it's impossible to distinguish between "no match because we reached the end of input" and "determined there was no match well before the end of input." The former case is what we care about with respect to quadratic behavior. The latter case is totally fine. Why don't we modify the normal search routines to report the position at which the search stops? I considered this, and I still wonder if it is indeed the right thing to do. However, I think the straight-forward thing to do there would be to complicate the return type signature of almost every search routine in this crate, which I really do not want to do. It therefore might make more sense to provide a richer way for search routines to report meta data, but that was beyond my bandwidth to work on at the time of writing. See the 'opt/reverse-inner' and 'opt/reverse-suffix' benchmarks in rebar for a real demonstration of how quadratic behavior is mitigated. */ use crate::{ meta::error::{RetryError, RetryQuadraticError}, HalfMatch, Input, MatchError, }; #[cfg(feature = "dfa-build")] pub(crate) fn dfa_try_search_half_rev( dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { use crate::dfa::Automaton; let mut mat = None; let mut sid = dfa.start_state_reverse(input)?; if input.start() == input.end() { dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?; return Ok(mat); } let mut at = input.end() - 1; loop { sid = dfa.next_state(sid, input.haystack()[at]); if dfa.is_special_state(sid) { if dfa.is_match_state(sid) { let pattern = dfa.match_pattern(sid, 0); // Since reverse searches report the beginning of a // match and the beginning is inclusive (not exclusive // like the end of a match), we add 1 to make it // inclusive. mat = Some(HalfMatch::new(pattern, at + 1)); } else if dfa.is_dead_state(sid) { return Ok(mat); } else if dfa.is_quit_state(sid) { if mat.is_some() { return Ok(mat); } return Err(MatchError::quit(input.haystack()[at], at).into()); } } if at == input.start() { break; } at -= 1; if at < min_start { trace!( "reached position {} which is before the previous literal \ match, quitting to avoid quadratic behavior", at, ); return Err(RetryError::Quadratic(RetryQuadraticError::new())); } } let was_dead = dfa.is_dead_state(sid); dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?; // If we reach the beginning of the search and we could otherwise still // potentially keep matching if there was more to match, then we actually // return an error to indicate giving up on this optimization. Why? Because // we can't prove that the real match begins at where we would report it. // // This only happens when all of the following are true: // // 1) We reach the starting point of our search span. // 2) The match we found is before the starting point. // 3) The FSM reports we could possibly find a longer match. // // We need (1) because otherwise the search stopped before the starting // point and there is no possible way to find a more leftmost position. // // We need (2) because if the match found has an offset equal to the minimum // possible offset, then there is no possible more leftmost match. // // We need (3) because if the FSM couldn't continue anyway (i.e., it's in // a dead state), then we know we couldn't find anything more leftmost // than what we have. (We have to check the state we were in prior to the // EOI transition since the EOI transition will usually bring us to a dead // state by virtue of it represents the end-of-input.) if at == input.start() && mat.map_or(false, |m| m.offset() > input.start()) && !was_dead { trace!( "reached beginning of search at offset {} without hitting \ a dead state, quitting to avoid potential false positive match", at, ); return Err(RetryError::Quadratic(RetryQuadraticError::new())); } Ok(mat) } #[cfg(feature = "hybrid")] pub(crate) fn hybrid_try_search_half_rev( dfa: &crate::hybrid::dfa::DFA, cache: &mut crate::hybrid::dfa::Cache, input: &Input<'_>, min_start: usize, ) -> Result<Option<HalfMatch>, RetryError> { let mut mat = None; let mut sid = dfa.start_state_reverse(cache, input)?; if input.start() == input.end() { hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; return Ok(mat); } let mut at = input.end() - 1; loop { sid = dfa .next_state(cache, sid, input.haystack()[at]) .map_err(|_| MatchError::gave_up(at))?; if sid.is_tagged() { if sid.is_match() { let pattern = dfa.match_pattern(cache, sid, 0); // Since reverse searches report the beginning of a // match and the beginning is inclusive (not exclusive // like the end of a match), we add 1 to make it // inclusive. mat = Some(HalfMatch::new(pattern, at + 1)); } else if sid.is_dead() { return Ok(mat); } else if sid.is_quit() { if mat.is_some() { return Ok(mat); } return Err(MatchError::quit(input.haystack()[at], at).into()); } } if at == input.start() { break; } at -= 1; if at < min_start { trace!( "reached position {} which is before the previous literal \ match, quitting to avoid quadratic behavior", at, ); return Err(RetryError::Quadratic(RetryQuadraticError::new())); } } let was_dead = sid.is_dead(); hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; // See the comments in the full DFA routine above for why we need this. if at == input.start() && mat.map_or(false, |m| m.offset() > input.start()) && !was_dead { trace!( "reached beginning of search at offset {} without hitting \ a dead state, quitting to avoid potential false positive match", at, ); return Err(RetryError::Quadratic(RetryQuadraticError::new())); } Ok(mat) } #[cfg(feature = "dfa-build")] #[cfg_attr(feature = "perf-inline", inline(always))] fn dfa_eoi_rev( dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>, input: &Input<'_>, sid: &mut crate::util::primitives::StateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { use crate::dfa::Automaton; let sp = input.get_span(); if sp.start > 0 { let byte = input.haystack()[sp.start - 1]; *sid = dfa.next_state(*sid, byte); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, sp.start)); } else if dfa.is_quit_state(*sid) { if mat.is_some() { return Ok(()); } return Err(MatchError::quit(byte, sp.start - 1)); } } else { *sid = dfa.next_eoi_state(*sid); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, 0)); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!dfa.is_quit_state(*sid)); } Ok(()) } #[cfg(feature = "hybrid")] #[cfg_attr(feature = "perf-inline", inline(always))] fn hybrid_eoi_rev( dfa: &crate::hybrid::dfa::DFA, cache: &mut crate::hybrid::dfa::Cache, input: &Input<'_>, sid: &mut crate::hybrid::LazyStateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { let sp = input.get_span(); if sp.start > 0 { let byte = input.haystack()[sp.start - 1]; *sid = dfa .next_state(cache, *sid, byte) .map_err(|_| MatchError::gave_up(sp.start))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, sp.start)); } else if sid.is_quit() { if mat.is_some() { return Ok(()); } return Err(MatchError::quit(byte, sp.start - 1)); } } else { *sid = dfa .next_eoi_state(cache, *sid) .map_err(|_| MatchError::gave_up(sp.start))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, 0)); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!sid.is_quit()); } Ok(()) } <file_sep>/regex-cli/cmd/debug/dfa.rs use std::io::{stdout, Write}; use crate::{ args, util::{self, Table}, }; use {lexopt, regex_automata::dfa::Automaton}; pub fn run_dense(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a dense DFA or a dense DFA regex. A DFA regex contains two DFAs: a forward DFA for finding the end of a match, and a reverse DFA for finding the start of a match. These can be compiled independently using just 'regex-cli debug dense dfa', but using the 'regex' sub-command will handle it for you and print the debug representation of both the forward and reverse DFAs. USAGE: regex-cli debug dense <command> ... COMMANDS: dfa Print the debug representation of a single dense DFA. regex Print the debug representation of a forward and reverse DFA regex. "; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "dfa" => run_dense_dfa(p), "regex" => run_dense_regex(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_dense_dfa(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a fully compiled dense DFA. USAGE: regex-cli debug dense dfa [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfa, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile nfa time", time); let (dfa, time) = util::timeitr(|| dfa.from_nfa(&nfa))?; table.add("compile dfa time", time); table.add("memory", dfa.memory_usage()); table.add("pattern len", dfa.pattern_len()); table.add("start kind", dfa.start_kind()); table.add("alphabet len", dfa.alphabet_len()); table.add("stride", dfa.stride()); table.add("has empty?", dfa.has_empty()); table.add("is utf8?", dfa.is_utf8()); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:?}", dfa)?; } Ok(()) } fn run_dense_regex(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a fully compiled dense DFA regex. This includes both the forward and reverse DFAs that make up a dense DFA regex. USAGE: regex-cli debug dense regex [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfafwd, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile forward nfa time", time); let (dfafwd, time) = util::timeitr(|| dfa.from_nfa(&nfafwd))?; table.add("compile forward dfa time", time); let (nfarev, time) = util::timeitr(|| thompson.reversed().from_hirs(&hirs))?; table.add("compile reverse nfa time", time); let (dfarev, time) = util::timeitr(|| dfa.reversed().from_nfa(&nfarev))?; table.add("compile reverse dfa time", time); let (re, time) = util::timeit(|| { regex_automata::dfa::regex::Builder::new() .build_from_dfas(dfafwd, dfarev) }); table.add("build regex time", time); table.add( "memory", re.forward().memory_usage() + re.reverse().memory_usage(), ); table.add("pattern len", re.pattern_len()); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:?}", re)?; } Ok(()) } pub fn run_sparse(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a sparse DFA or a sparse DFA regex. A DFA regex contains two DFAs: a forward DFA for finding the end of a match, and a reverse DFA for finding the start of a match. These can be compiled independently using just 'regex-cli debug dense dfa', but using the 'regex' sub-command will handle it for you and print the debug representation of both the forward and reverse DFAs. USAGE: regex-cli debug sparse <command> ... COMMANDS: dfa Print the debug representation of a single sparse DFA. regex Print the debug representation of a forward and reverse DFA regex. "; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "dfa" => run_sparse_dfa(p), "regex" => run_sparse_regex(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_sparse_dfa(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a fully compiled sparse DFA. USAGE: regex-cli debug sparse dfa [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfa, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile nfa time", time); let (dfa, time) = util::timeitr(|| dfa.from_nfa_sparse(&nfa))?; table.add("compile dfa time", time); table.add("memory", dfa.memory_usage()); table.add("pattern len", dfa.pattern_len()); table.add("start kind", dfa.start_kind()); table.add("has empty?", dfa.has_empty()); table.add("is utf8?", dfa.is_utf8()); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:?}", dfa)?; } Ok(()) } fn run_sparse_regex(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Prints the debug representation of a fully compiled sparse DFA regex. This includes both the forward and reverse DFAs that make up a sparse DFA regex. USAGE: regex-cli debug sparse regex [<pattern> ...] TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::positional(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut dfa = args::dfa::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut syntax, &mut thompson, &mut dfa, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfafwd, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile forward nfa time", time); let (dfafwd, time) = util::timeitr(|| dfa.from_nfa_sparse(&nfafwd))?; table.add("compile forward dfa time", time); let (nfarev, time) = util::timeitr(|| thompson.reversed().from_hirs(&hirs))?; table.add("compile reverse nfa time", time); let (dfarev, time) = util::timeitr(|| dfa.reversed().from_nfa_sparse(&nfarev))?; table.add("compile reverse dfa time", time); let (re, time) = util::timeit(|| { regex_automata::dfa::regex::Builder::new() .build_from_dfas(dfafwd, dfarev) }); table.add("build regex time", time); table.add( "memory", re.forward().memory_usage() + re.reverse().memory_usage(), ); table.add("pattern len", re.pattern_len()); if common.table() { table.print(stdout())?; } if !common.quiet { if common.table() { writeln!(stdout(), "")?; } writeln!(stdout(), "{:?}", re)?; } Ok(()) } <file_sep>/regex-automata/src/dfa/remapper.rs use alloc::vec::Vec; use crate::util::primitives::StateID; /// Remappable is a tightly coupled abstraction that facilitates remapping /// state identifiers in DFAs. /// /// The main idea behind remapping state IDs is that DFAs often need to check /// if a certain state is a "special" state of some kind (like a match state) /// during a search. Since this is extremely perf critical code, we want this /// check to be as fast as possible. Partitioning state IDs into, for example, /// into "non-match" and "match" states means one can tell if a state is a /// match state via a simple comparison of the state ID. /// /// The issue is that during the DFA construction process, it's not /// particularly easy to partition the states. Instead, the simplest thing is /// to often just do a pass over all of the states and shuffle them into their /// desired partitionings. To do that, we need a mechanism for swapping states. /// Hence, this abstraction. /// /// Normally, for such little code, I would just duplicate it. But this is a /// key optimization and the implementation is a bit subtle. So the abstraction /// is basically a ham-fisted attempt at DRY. The only place we use this is in /// the dense and one-pass DFAs. /// /// See also src/dfa/special.rs for a more detailed explanation of how dense /// DFAs are partitioned. pub(super) trait Remappable: core::fmt::Debug { /// Return the total number of states. fn state_len(&self) -> usize; /// Return the power-of-2 exponent that yields the stride. The pertinent /// laws here are, where N=stride2: 2^N=stride and len(alphabet) <= stride. fn stride2(&self) -> usize; /// Swap the states pointed to by the given IDs. The underlying finite /// state machine should be mutated such that all of the transitions in /// `id1` are now in the memory region where the transitions for `id2` /// were, and all of the transitions in `id2` are now in the memory region /// where the transitions for `id1` were. /// /// Essentially, this "moves" `id1` to `id2` and `id2` to `id1`. /// /// It is expected that, after calling this, the underlying value will be /// left in an inconsistent state, since any other transitions pointing to, /// e.g., `id1` need to be updated to point to `id2`, since that's where /// `id1` moved to. /// /// In order to "fix" the underlying inconsistent state, a `Remapper` /// should be used to guarantee that `remap` is called at the appropriate /// time. fn swap_states(&mut self, id1: StateID, id2: StateID); /// This must remap every single state ID in the underlying value according /// to the function given. For example, in a DFA, this should remap every /// transition and every starting state ID. fn remap(&mut self, map: impl Fn(StateID) -> StateID); } /// Remapper is an abstraction the manages the remapping of state IDs in a /// finite state machine. This is useful when one wants to shuffle states into /// different positions in the machine. /// /// One of the key complexities this manages is the ability to correctly move /// one state multiple times. /// /// Once shuffling is complete, `remap` must be called, which will rewrite /// all pertinent transitions to updated state IDs. Neglecting to call `remap` /// will almost certainly result in a corrupt machine. #[derive(Debug)] pub(super) struct Remapper { /// A map from the index of a state to its pre-multiplied identifier. /// /// When a state is swapped with another, then their corresponding /// locations in this map are also swapped. Thus, its new position will /// still point to its old pre-multiplied StateID. /// /// While there is a bit more to it, this then allows us to rewrite the /// state IDs in a DFA's transition table in a single pass. This is done /// by iterating over every ID in this map, then iterating over each /// transition for the state at that ID and re-mapping the transition from /// `old_id` to `map[dfa.to_index(old_id)]`. That is, we find the position /// in this map where `old_id` *started*, and set it to where it ended up /// after all swaps have been completed. map: Vec<StateID>, /// A mapper from state index to state ID (and back). idxmap: IndexMapper, } impl Remapper { /// Create a new remapper from the given remappable implementation. The /// remapper can then be used to swap states. The remappable value given /// here must the same one given to `swap` and `remap`. pub(super) fn new(r: &impl Remappable) -> Remapper { let idxmap = IndexMapper { stride2: r.stride2() }; let map = (0..r.state_len()).map(|i| idxmap.to_state_id(i)).collect(); Remapper { map, idxmap } } /// Swap two states. Once this is called, callers must follow through to /// call `remap`, or else it's possible for the underlying remappable /// value to be in a corrupt state. pub(super) fn swap( &mut self, r: &mut impl Remappable, id1: StateID, id2: StateID, ) { if id1 == id2 { return; } r.swap_states(id1, id2); self.map.swap(self.idxmap.to_index(id1), self.idxmap.to_index(id2)); } /// Complete the remapping process by rewriting all state IDs in the /// remappable value according to the swaps performed. pub(super) fn remap(mut self, r: &mut impl Remappable) { // Update the map to account for states that have been swapped // multiple times. For example, if (A, C) and (C, G) are swapped, then // transitions previously pointing to A should now point to G. But if // we don't update our map, they will erroneously be set to C. All we // do is follow the swaps in our map until we see our original state // ID. // // The intuition here is to think about how changes are made to the // map: only through pairwise swaps. That means that starting at any // given state, it is always possible to find the loop back to that // state by following the swaps represented in the map (which might be // 0 swaps). // // We are also careful to clone the map before starting in order to // freeze it. We use the frozen map to find our loops, since we need to // update our map as well. Without freezing it, our updates could break // the loops referenced above and produce incorrect results. let oldmap = self.map.clone(); for i in 0..r.state_len() { let cur_id = self.idxmap.to_state_id(i); let mut new_id = oldmap[i]; if cur_id == new_id { continue; } loop { let id = oldmap[self.idxmap.to_index(new_id)]; if cur_id == id { self.map[i] = new_id; break; } new_id = id; } } r.remap(|next| self.map[self.idxmap.to_index(next)]); } } /// A simple type for mapping between state indices and state IDs. /// /// The reason why this exists is because state IDs are "premultiplied." That /// is, in order to get to the transitions for a particular state, one need /// only use the state ID as-is, instead of having to multiple it by transition /// table's stride. /// /// The downside of this is that it's inconvenient to map between state IDs /// using a dense map, e.g., Vec<StateID>. That's because state IDs look like /// `0`, `0+stride`, `0+2*stride`, `0+3*stride`, etc., instead of `0`, `1`, /// `2`, `3`, etc. /// /// Since our state IDs are premultiplied, we can convert back-and-forth /// between IDs and indices by simply unmultiplying the IDs and multiplying the /// indices. #[derive(Debug)] struct IndexMapper { /// The power of 2 corresponding to the stride of the corresponding /// transition table. 'id >> stride2' de-multiplies an ID while 'index << /// stride2' pre-multiplies an index to an ID. stride2: usize, } impl IndexMapper { /// Convert a state ID to a state index. fn to_index(&self, id: StateID) -> usize { id.as_usize() >> self.stride2 } /// Convert a state index to a state ID. fn to_state_id(&self, index: usize) -> StateID { // CORRECTNESS: If the given index is not valid, then it is not // required for this to panic or return a valid state ID. We'll "just" // wind up with panics or silent logic errors at some other point. StateID::new_unchecked(index << self.stride2) } } #[cfg(feature = "dfa-build")] mod dense { use crate::{dfa::dense::OwnedDFA, util::primitives::StateID}; use super::Remappable; impl Remappable for OwnedDFA { fn state_len(&self) -> usize { OwnedDFA::state_len(self) } fn stride2(&self) -> usize { OwnedDFA::stride2(self) } fn swap_states(&mut self, id1: StateID, id2: StateID) { OwnedDFA::swap_states(self, id1, id2) } fn remap(&mut self, map: impl Fn(StateID) -> StateID) { OwnedDFA::remap(self, map) } } } #[cfg(feature = "dfa-onepass")] mod onepass { use crate::{dfa::onepass::DFA, util::primitives::StateID}; use super::Remappable; impl Remappable for DFA { fn state_len(&self) -> usize { DFA::state_len(self) } fn stride2(&self) -> usize { // We don't do pre-multiplication for the one-pass DFA, so // returning 0 has the effect of making state IDs and state indices // equivalent. 0 } fn swap_states(&mut self, id1: StateID, id2: StateID) { DFA::swap_states(self, id1, id2) } fn remap(&mut self, map: impl Fn(StateID) -> StateID) { DFA::remap(self, map) } } } <file_sep>/regex-automata/src/macros.rs // Some feature combinations result in some of these macros never being used. // Which is fine. Just squash the warnings. #![allow(unused_macros)] macro_rules! log { ($($tt:tt)*) => { #[cfg(feature = "logging")] { $($tt)* } } } macro_rules! debug { ($($tt:tt)*) => { log!(log::debug!($($tt)*)) } } macro_rules! trace { ($($tt:tt)*) => { log!(log::trace!($($tt)*)) } } <file_sep>/record/compile-test/README.md This directory contains the results of compilation tests. Specifically, the results are from testing both the from scratch compilation time and relative binary size increases of various features for both the `regex` and `regex-automata` crates. Here's an example of how to run these tests for just the `regex` crate. You'll need the `regex-cli` command installed, which can be found in the `regex-cli` directory in the root of this repository. This must be run in the root of a checkout of this repository. ``` $ mkdir /tmp/regex-compile-test $ regex-cli compile-test ./ /tmp/regex-compile-test | tee record/compile-test/2023-04-19_1.7.3.csv ``` You can then look at the results using a tool like [`xsv`][xsv]: ``` $ xsv table record/compile-test/2023-04-19_1.7.3.csv ``` Note that the relative binary size is computed by building a "baseline" hello world program, and then subtracting that from the size of a binary that uses the regex crate. [xsv]: https://github.com/BurntSushi/xsv <file_sep>/testdata/earliest.toml [[test]] name = "no-greedy-100" regex = 'a+' haystack = "aaa" matches = [[0, 1], [1, 2], [2, 3]] search-kind = "earliest" [[test]] name = "no-greedy-200" regex = 'abc+' haystack = "zzzabccc" matches = [[3, 6]] search-kind = "earliest" [[test]] name = "is-ungreedy" regex = 'a+?' haystack = "aaa" matches = [[0, 1], [1, 2], [2, 3]] search-kind = "earliest" [[test]] name = "look-start-test" regex = '^(abc|a)' haystack = "abc" matches = [ [[0, 1], [0, 1]], ] search-kind = "earliest" [[test]] name = "look-end-test" regex = '(abc|a)$' haystack = "abc" matches = [ [[0, 3], [0, 3]], ] search-kind = "earliest" [[test]] name = "no-leftmost-first-100" regex = 'abc|a' haystack = "abc" matches = [[0, 1]] search-kind = "earliest" [[test]] name = "no-leftmost-first-200" regex = 'aba|a' haystack = "aba" matches = [[0, 1], [2, 3]] search-kind = "earliest" <file_sep>/testdata/overlapping.toml # NOTE: We define a number of tests where the *match* kind is 'leftmost-first' # but the *search* kind is 'overlapping'. This is a somewhat nonsensical # combination and can produce odd results. Nevertheless, those results should # be consistent so we test them here. (At the time of writing this note, I # hadn't yet decided whether to make 'leftmost-first' with 'overlapping' result # in unspecified behavior.) # This demonstrates how a full overlapping search is obvious quadratic. This # regex reports a match for every substring in the haystack. [[test]] name = "ungreedy-dotstar-matches-everything-100" regex = [".*?"] haystack = "zzz" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [0, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [1, 2] }, { id = 0, span = [0, 2] }, { id = 0, span = [3, 3] }, { id = 0, span = [2, 3] }, { id = 0, span = [1, 3] }, { id = 0, span = [0, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "greedy-dotstar-matches-everything-100" regex = [".*"] haystack = "zzz" matches = [ { id = 0, span = [0, 0] }, { id = 0, span = [1, 1] }, { id = 0, span = [0, 1] }, { id = 0, span = [2, 2] }, { id = 0, span = [1, 2] }, { id = 0, span = [0, 2] }, { id = 0, span = [3, 3] }, { id = 0, span = [2, 3] }, { id = 0, span = [1, 3] }, { id = 0, span = [0, 3] }, ] match-kind = "all" search-kind = "overlapping" [[test]] name = "repetition-plus-leftmost-first-100" regex = 'a+' haystack = "aaa" matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]] match-kind = "leftmost-first" search-kind = "overlapping" [[test]] name = "repetition-plus-leftmost-first-110" regex = '☃+' haystack = "☃☃☃" matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]] match-kind = "leftmost-first" search-kind = "overlapping" [[test]] name = "repetition-plus-all-100" regex = 'a+' haystack = "aaa" matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]] match-kind = "all" search-kind = "overlapping" [[test]] name = "repetition-plus-all-110" regex = '☃+' haystack = "☃☃☃" matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]] match-kind = "all" search-kind = "overlapping" [[test]] name = "repetition-plus-leftmost-first-200" regex = '(abc)+' haystack = "zzabcabczzabc" matches = [ [[2, 5], [2, 5]], [[5, 8], [5, 8]], [[2, 8], [5, 8]], ] match-kind = "leftmost-first" search-kind = "overlapping" [[test]] name = "repetition-plus-all-200" regex = '(abc)+' haystack = "zzabcabczzabc" matches = [ [[2, 5], [2, 5]], [[5, 8], [5, 8]], [[2, 8], [5, 8]], [[10, 13], [10, 13]], ] match-kind = "all" search-kind = "overlapping" [[test]] name = "repetition-star-leftmost-first-100" regex = 'a*' haystack = "aaa" matches = [ [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2], [3, 3], [2, 3], [1, 3], [0, 3], ] match-kind = "leftmost-first" search-kind = "overlapping" [[test]] name = "repetition-star-all-100" regex = 'a*' haystack = "aaa" matches = [ [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2], [3, 3], [2, 3], [1, 3], [0, 3], ] match-kind = "all" search-kind = "overlapping" [[test]] name = "repetition-star-leftmost-first-200" regex = '(abc)*' haystack = "zzabcabczzabc" matches = [ [[0, 0], []], ] match-kind = "leftmost-first" search-kind = "overlapping" [[test]] name = "repetition-star-all-200" regex = '(abc)*' haystack = "zzabcabczzabc" matches = [ [[0, 0], []], [[1, 1], []], [[2, 2], []], [[3, 3], []], [[4, 4], []], [[5, 5], []], [[2, 5], [2, 5]], [[6, 6], []], [[7, 7], []], [[8, 8], []], [[5, 8], [5, 8]], [[2, 8], [5, 8]], [[9, 9], []], [[10, 10], []], [[11, 11], []], [[12, 12], []], [[13, 13], []], [[10, 13], [10, 13]], ] match-kind = "all" search-kind = "overlapping" [[test]] name = "start-end-rep-leftmost-first" regex = '(^$)*' haystack = "abc" matches = [ [[0, 0], []], ] match-kind = "leftmost-first" search-kind = "overlapping" [[test]] name = "start-end-rep-all" regex = '(^$)*' haystack = "abc" matches = [ [[0, 0], []], [[1, 1], []], [[2, 2], []], [[3, 3], []], ] match-kind = "all" search-kind = "overlapping" [[test]] name = "alt-leftmost-first-100" regex = 'abc|a' haystack = "zzabcazzaabc" matches = [[2, 3], [2, 5]] match-kind = "leftmost-first" search-kind = "overlapping" [[test]] name = "alt-all-100" regex = 'abc|a' haystack = "zzabcazzaabc" matches = [[2, 3], [2, 5], [5, 6], [8, 9], [9, 10], [9, 12]] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty-000" regex = "" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty-alt-000" regex = "|b" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]] match-kind = "all" search-kind = "overlapping" [[test]] name = "empty-alt-010" regex = "b|" haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]] match-kind = "all" search-kind = "overlapping" [[test]] # See: https://github.com/rust-lang/regex/issues/484 name = "iter1-bytes" regex = '' haystack = "☃" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] utf8 = false match-kind = "all" search-kind = "overlapping" [[test]] # See: https://github.com/rust-lang/regex/issues/484 name = "iter1-utf8" regex = '' haystack = "☃" matches = [[0, 0], [3, 3]] match-kind = "all" search-kind = "overlapping" [[test]] name = "iter1-incomplete-utf8" regex = '' haystack = '\xE2\x98' # incomplete snowman matches = [[0, 0], [1, 1], [2, 2]] match-kind = "all" search-kind = "overlapping" unescape = true utf8 = false [[test]] name = "scratch" regex = ['sam', 'samwise'] haystack = "samwise" matches = [ { id = 0, span = [0, 3] }, ] match-kind = "leftmost-first" search-kind = "overlapping" <file_sep>/regex-cli/args/api.rs use { lexopt::{Arg, Parser}, regex::bytes::{Regex, RegexSet}, regex_automata::util::syntax, }; use crate::args::{self, Configurable, Usage}; /// Exposes the configuration for the top-level `Regex` API. #[derive(Debug, Default)] pub struct Config { size_limit: Option<usize>, dfa_size_limit: Option<usize>, } impl Config { /// Builds a `Regex` from the given syntax configuration and sequence of /// patterns. This returns an error is `patterns.len() != 1`. /// /// Note that this also returns an error if UTF-8 mode is enabled in /// the given syntax configuration. This is mostly because we stick to /// returning a `regex::bytes::Regex` which requires hard-codes disabling /// UTF-8 mode. We could add another constructor for `regex::Regex` which /// requires that UTF-8 mode is enabled if it's needed, but I don't think /// it is. pub fn from_patterns( &self, syntax: &syntax::Config, patterns: &[String], ) -> anyhow::Result<Regex> { anyhow::ensure!( !syntax.get_utf8(), "API-level regex requires that UTF-8 syntax mode be disabled", ); anyhow::ensure!( patterns.len() == 1, "API-level regex requires exactly one pattern, \ but {} were given", patterns.len(), ); let mut b = regex::bytes::RegexBuilder::new(&patterns[0]); b.case_insensitive(syntax.get_case_insensitive()); b.multi_line(syntax.get_multi_line()); b.dot_matches_new_line(syntax.get_dot_matches_new_line()); b.swap_greed(syntax.get_swap_greed()); b.ignore_whitespace(syntax.get_ignore_whitespace()); b.unicode(syntax.get_unicode()); b.octal(syntax.get_octal()); b.nest_limit(syntax.get_nest_limit()); b.size_limit(self.size_limit.unwrap_or(usize::MAX)); if let Some(limit) = self.dfa_size_limit { b.dfa_size_limit(limit); } b.build().map_err(anyhow::Error::from) } /// Builds a `RegexSet` from the given syntax configuration and sequence of /// patterns. /// /// Note that this returns an error if UTF-8 mode is enabled in the given /// syntax configuration. This is mostly because we stick to returning a /// `regex::bytes::RegexSet` which requires hard-codes disabling UTF-8 /// mode. We could add another constructor for `regex::RegexSet` which /// requires that UTF-8 mode is enabled if it's needed, but I don't think /// it is. pub fn from_patterns_set( &self, syntax: &syntax::Config, patterns: &[String], ) -> anyhow::Result<RegexSet> { anyhow::ensure!( !syntax.get_utf8(), "API-level regex requires that UTF-8 syntax mode be disabled", ); let mut b = regex::bytes::RegexSetBuilder::new(patterns); b.case_insensitive(syntax.get_case_insensitive()); b.multi_line(syntax.get_multi_line()); b.dot_matches_new_line(syntax.get_dot_matches_new_line()); b.swap_greed(syntax.get_swap_greed()); b.ignore_whitespace(syntax.get_ignore_whitespace()); b.unicode(syntax.get_unicode()); b.octal(syntax.get_octal()); b.nest_limit(syntax.get_nest_limit()); b.size_limit(self.size_limit.unwrap_or(usize::MAX)); if let Some(limit) = self.dfa_size_limit { b.dfa_size_limit(limit); } b.build().map_err(anyhow::Error::from) } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("size-limit") => { self.size_limit = args::parse_maybe(p, "--size-limit")?; } Arg::Long("dfa-size-limit") => { self.dfa_size_limit = Some(args::parse(p, "--dfa-size-limit")?); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "--size-limit", "Set a limit on heap used by a regex.", r#" This sets a limit, in bytes, on the heap memory used by a regex. The special value 'none' indicates that no size limit should be imposed. "#, ), Usage::new( "--dfa-size-limit", "Set a limit on the heap used by a regex's internal lazy DFA.", r#" This sets a capacity, in bytes, on the approximate maximum total heap memory used by a regex's internal lazy DFA. This only applies if a lazy DFA is used. Note that one cannot set this to 'none' since it represents a capacity. When it isn't set, then some reasonable default is used. "#, ), ]; USAGES } } <file_sep>/regex-capi/include/rure.h #ifndef _RURE_H #define _RURE_H #include <stdbool.h> #include <stdint.h> #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /* * rure is the type of a compiled regular expression. * * An rure can be safely used from multiple threads simultaneously. */ typedef struct rure rure; /* * rure_set is the type of a set of compiled regular expressions. * * A rure can be safely used from multiple threads simultaneously. */ typedef struct rure_set rure_set; /* * rure_options is the set of non-flag configuration options for compiling * a regular expression. Currently, only two options are available: setting * the size limit of the compiled program and setting the size limit of the * cache of states that the DFA uses while searching. * * For most uses, the default settings will work fine, and NULL can be passed * wherever a *rure_options is expected. */ typedef struct rure_options rure_options; /* * The flags listed below can be used in rure_compile to set the default * flags. All flags can otherwise be toggled in the expression itself using * standard syntax, e.g., `(?i)` turns case insensitive matching on and `(?-i)` * disables it. */ /* The case insensitive (i) flag. */ #define RURE_FLAG_CASEI (1 << 0) /* The multi-line matching (m) flag. (^ and $ match new line boundaries.) */ #define RURE_FLAG_MULTI (1 << 1) /* The any character (s) flag. (. matches new line.) */ #define RURE_FLAG_DOTNL (1 << 2) /* The greedy swap (U) flag. (e.g., + is ungreedy and +? is greedy.) */ #define RURE_FLAG_SWAP_GREED (1 << 3) /* The ignore whitespace (x) flag. */ #define RURE_FLAG_SPACE (1 << 4) /* The Unicode (u) flag. */ #define RURE_FLAG_UNICODE (1 << 5) /* The default set of flags enabled when no flags are set. */ #define RURE_DEFAULT_FLAGS RURE_FLAG_UNICODE /* * rure_match corresponds to the location of a single match in a haystack. */ typedef struct rure_match { /* The start position. */ size_t start; /* The end position. */ size_t end; } rure_match; /* * rure_captures represents storage for sub-capture locations of a match. * * Computing the capture groups of a match can carry a significant performance * penalty, so their use in the API is optional. * * An rure_captures value can be reused in multiple calls to rure_find_captures, * so long as it is used with the compiled regular expression that created * it. * * An rure_captures value may outlive its corresponding rure and can be freed * independently. * * It is not safe to use from multiple threads simultaneously. */ typedef struct rure_captures rure_captures; /* * rure_iter is an iterator over successive non-overlapping matches in a * particular haystack. * * An rure_iter value may not outlive its corresponding rure and should be freed * before its corresponding rure is freed. * * It is not safe to use from multiple threads simultaneously. */ typedef struct rure_iter rure_iter; /* * rure_iter_capture_names is an iterator over the list of capture group names * in this particular rure. * * An rure_iter_capture_names value may not outlive its corresponding rure, * and should be freed before its corresponding rure is freed. * * It is not safe to use from multiple threads simultaneously. */ typedef struct rure_iter_capture_names rure_iter_capture_names; /* * rure_error is an error that caused compilation to fail. * * Most errors are syntax errors but an error can be returned if the compiled * regular expression would be too big. * * Whenever a function accepts an *rure_error, it is safe to pass NULL. (But * you will not get access to the error if one occurred.) * * It is not safe to use from multiple threads simultaneously. */ typedef struct rure_error rure_error; /* * rure_compile_must compiles the given pattern into a regular expression. If * compilation fails for any reason, an error message is printed to stderr and * the process is aborted. * * The pattern given should be in UTF-8. For convenience, this accepts a C * string, which means the pattern cannot usefully contain NUL. If your pattern * may contain NUL, consider using a regular expression escape sequence, or * just use rure_compile. * * This uses RURE_DEFAULT_FLAGS. * * The compiled expression returned may be used from multiple threads * simultaneously. */ rure *rure_compile_must(const char *pattern); /* * rure_compile compiles the given pattern into a regular expression. The * pattern must be valid UTF-8 and the length corresponds to the number of * bytes in the pattern. * * flags is a bitfield. Valid values are constants declared with prefix * RURE_FLAG_. * * options contains non-flag configuration settings. If it's NULL, default * settings are used. options may be freed immediately after a call to * rure_compile. * * error is set if there was a problem compiling the pattern (including if the * pattern is not valid UTF-8). If error is NULL, then no error information * is returned. In all cases, if an error occurs, NULL is returned. * * The compiled expression returned may be used from multiple threads * simultaneously. */ rure *rure_compile(const uint8_t *pattern, size_t length, uint32_t flags, rure_options *options, rure_error *error); /* * rure_free frees the given compiled regular expression. * * This must be called at most once for any rure. */ void rure_free(rure *re); /* * rure_is_match returns true if and only if re matches anywhere in haystack. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. * * start is the position at which to start searching. Note that setting the * start position is distinct from incrementing the pointer, since the regex * engine may look at bytes before the start position to determine match * information. For example, if the start position is greater than 0, then the * \A ("begin text") anchor can never match. * * rure_is_match should be preferred to rure_find since it may be faster. * * N.B. The performance of this search is not impacted by the presence of * capturing groups in your regular expression. */ bool rure_is_match(rure *re, const uint8_t *haystack, size_t length, size_t start); /* * rure_find returns true if and only if re matches anywhere in haystack. * If a match is found, then its start and end offsets (in bytes) are set * on the match pointer given. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. * * start is the position at which to start searching. Note that setting the * start position is distinct from incrementing the pointer, since the regex * engine may look at bytes before the start position to determine match * information. For example, if the start position is greater than 0, then the * \A ("begin text") anchor can never match. * * rure_find should be preferred to rure_find_captures since it may be faster. * * N.B. The performance of this search is not impacted by the presence of * capturing groups in your regular expression. */ bool rure_find(rure *re, const uint8_t *haystack, size_t length, size_t start, rure_match *match); /* * rure_find_captures returns true if and only if re matches anywhere in * haystack. If a match is found, then all of its capture locations are stored * in the captures pointer given. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. * * start is the position at which to start searching. Note that setting the * start position is distinct from incrementing the pointer, since the regex * engine may look at bytes before the start position to determine match * information. For example, if the start position is greater than 0, then the * \A ("begin text") anchor can never match. * * Only use this function if you specifically need access to capture locations. * It is not necessary to use this function just because your regular * expression contains capturing groups. * * Capture locations can be accessed using the rure_captures_* functions. * * N.B. The performance of this search can be impacted by the number of * capturing groups. If you're using this function, it may be beneficial to * use non-capturing groups (e.g., `(?:re)`) where possible. */ bool rure_find_captures(rure *re, const uint8_t *haystack, size_t length, size_t start, rure_captures *captures); /* * rure_shortest_match returns true if and only if re matches anywhere in * haystack. If a match is found, then its end location is stored in the * pointer given. The end location is the place at which the regex engine * determined that a match exists, but may occur before the end of the proper * leftmost-first match. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. * * start is the position at which to start searching. Note that setting the * start position is distinct from incrementing the pointer, since the regex * engine may look at bytes before the start position to determine match * information. For example, if the start position is greater than 0, then the * \A ("begin text") anchor can never match. * * rure_shortest_match should be preferred to rure_find since it may be faster. * * N.B. The performance of this search is not impacted by the presence of * capturing groups in your regular expression. */ bool rure_shortest_match(rure *re, const uint8_t *haystack, size_t length, size_t start, size_t *end); /* * rure_capture_name_index returns the capture index for the name given. If * no such named capturing group exists in re, then -1 is returned. * * The capture index may be used with rure_captures_at. * * This function never returns 0 since the first capture group always * corresponds to the entire match and is always unnamed. */ int32_t rure_capture_name_index(rure *re, const char *name); /* * rure_iter_capture_names_new creates a new capture_names iterator. * * An iterator will report all successive capture group names of re. */ rure_iter_capture_names *rure_iter_capture_names_new(rure *re); /* * rure_iter_capture_names_free frees the iterator given. * * It must be called at most once. */ void rure_iter_capture_names_free(rure_iter_capture_names *it); /* * rure_iter_capture_names_next advances the iterator and returns true * if and only if another capture group name exists. * * The value of the capture group name is written to the provided pointer. */ bool rure_iter_capture_names_next(rure_iter_capture_names *it, char **name); /* * rure_iter_new creates a new iterator. * * An iterator will report all successive non-overlapping matches of re. * When calling iterator functions, the same haystack and length must be * supplied to all invocations. (Strict pointer equality is, however, not * required.) */ rure_iter *rure_iter_new(rure *re); /* * rure_iter_free frees the iterator given. * * It must be called at most once. */ void rure_iter_free(rure_iter *it); /* * rure_iter_next advances the iterator and returns true if and only if a * match was found. If a match is found, then the match pointer is set with the * start and end location of the match, in bytes. * * If no match is found, then subsequent calls will return false indefinitely. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. The given haystack must * be logically equivalent to all other haystacks given to this iterator. * * rure_iter_next should be preferred to rure_iter_next_captures since it may * be faster. * * N.B. The performance of this search is not impacted by the presence of * capturing groups in your regular expression. */ bool rure_iter_next(rure_iter *it, const uint8_t *haystack, size_t length, rure_match *match); /* * rure_iter_next_captures advances the iterator and returns true if and only if a * match was found. If a match is found, then all of its capture locations are * stored in the captures pointer given. * * If no match is found, then subsequent calls will return false indefinitely. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. The given haystack must * be logically equivalent to all other haystacks given to this iterator. * * Only use this function if you specifically need access to capture locations. * It is not necessary to use this function just because your regular * expression contains capturing groups. * * Capture locations can be accessed using the rure_captures_* functions. * * N.B. The performance of this search can be impacted by the number of * capturing groups. If you're using this function, it may be beneficial to * use non-capturing groups (e.g., `(?:re)`) where possible. */ bool rure_iter_next_captures(rure_iter *it, const uint8_t *haystack, size_t length, rure_captures *captures); /* * rure_captures_new allocates storage for all capturing groups in re. * * An rure_captures value may be reused on subsequent calls to * rure_find_captures or rure_iter_next_captures. * * An rure_captures value may be freed independently of re, although any * particular rure_captures should be used only with the re given here. * * It is not safe to use an rure_captures value from multiple threads * simultaneously. */ rure_captures *rure_captures_new(rure *re); /* * rure_captures_free frees the given captures. * * This must be called at most once. */ void rure_captures_free(rure_captures *captures); /* * rure_captures_at returns true if and only if the capturing group at the * index given was part of a match. If so, the given match pointer is populated * with the start and end location (in bytes) of the capturing group. * * If no capture group with the index i exists, then false is * returned. (A capturing group exists if and only if i is less than * rure_captures_len(captures).) * * Note that index 0 corresponds to the full match. */ bool rure_captures_at(rure_captures *captures, size_t i, rure_match *match); /* * rure_captures_len returns the number of capturing groups in the given * captures. */ size_t rure_captures_len(rure_captures *captures); /* * rure_options_new allocates space for options. * * Options may be freed immediately after a call to rure_compile, but otherwise * may be freely used in multiple calls to rure_compile. * * It is not safe to set options from multiple threads simultaneously. It is * safe to call rure_compile from multiple threads simultaneously using the * same options pointer. */ rure_options *rure_options_new(void); /* * rure_options_free frees the given options. * * This must be called at most once. */ void rure_options_free(rure_options *options); /* * rure_options_size_limit sets the approximate size limit of the compiled * regular expression. * * This size limit roughly corresponds to the number of bytes occupied by a * single compiled program. If the program would exceed this number, then a * compilation error will be returned from rure_compile. */ void rure_options_size_limit(rure_options *options, size_t limit); /* * rure_options_dfa_size_limit sets the approximate size of the cache used by * the DFA during search. * * This roughly corresponds to the number of bytes that the DFA will use while * searching. * * Note that this is a *per thread* limit. There is no way to set a global * limit. In particular, if a regular expression is used from multiple threads * simultaneously, then each thread may use up to the number of bytes * specified here. */ void rure_options_dfa_size_limit(rure_options *options, size_t limit); /* * rure_compile_set compiles the given list of patterns into a single regular * expression which can be matched in a linear-scan. Each pattern in patterns * must be valid UTF-8 and the length of each pattern in patterns corresponds * to a byte length in patterns_lengths. * * The number of patterns to compile is specified by patterns_count. patterns * must contain at least this many entries. * * flags is a bitfield. Valid values are constants declared with prefix * RURE_FLAG_. * * options contains non-flag configuration settings. If it's NULL, default * settings are used. options may be freed immediately after a call to * rure_compile. * * error is set if there was a problem compiling the pattern. * * The compiled expression set returned may be used from multiple threads. */ rure_set *rure_compile_set(const uint8_t **patterns, const size_t *patterns_lengths, size_t patterns_count, uint32_t flags, rure_options *options, rure_error *error); /* * rure_set_free frees the given compiled regular expression set. * * This must be called at most once for any rure_set. */ void rure_set_free(rure_set *re); /* * rure_is_match returns true if and only if any regexes within the set * match anywhere in the haystack. Once a match has been located, the * matching engine will quit immediately. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. * * start is the position at which to start searching. Note that setting the * start position is distinct from incrementing the pointer, since the regex * engine may look at bytes before the start position to determine match * information. For example, if the start position is greater than 0, then the * \A ("begin text") anchor can never match. */ bool rure_set_is_match(rure_set *re, const uint8_t *haystack, size_t length, size_t start); /* * rure_set_matches compares each regex in the set against the haystack and * modifies matches with the match result of each pattern. Match results are * ordered in the same way as the rure_set was compiled. For example, * index 0 of matches corresponds to the first pattern passed to * `rure_compile_set`. * * haystack may contain arbitrary bytes, but ASCII compatible text is more * useful. UTF-8 is even more useful. Other text encodings aren't supported. * length should be the number of bytes in haystack. * * start is the position at which to start searching. Note that setting the * start position is distinct from incrementing the pointer, since the regex * engine may look at bytes before the start position to determine match * information. For example, if the start position is greater than 0, then the * \A ("begin text") anchor can never match. * * matches must be greater than or equal to the number of patterns the * rure_set was compiled with. * * Only use this function if you specifically need to know which regexes * matched within the set. To determine if any of the regexes matched without * caring which, use rure_set_is_match. */ bool rure_set_matches(rure_set *re, const uint8_t *haystack, size_t length, size_t start, bool *matches); /* * rure_set_len returns the number of patterns rure_set was compiled with. */ size_t rure_set_len(rure_set *re); /* * rure_error_new allocates space for an error. * * If error information is desired, then rure_error_new should be called * to create an rure_error pointer, and that pointer can be passed to * rure_compile. If an error occurred, then rure_compile will return NULL and * the error pointer will be set. A message can then be extracted. * * It is not safe to use errors from multiple threads simultaneously. An error * value may be reused on subsequent calls to rure_compile. */ rure_error *rure_error_new(void); /* * rure_error_free frees the error given. * * This must be called at most once. */ void rure_error_free(rure_error *err); /* * rure_error_message returns a NUL terminated string that describes the error * message. * * The pointer returned must not be freed. Instead, it will be freed when * rure_error_free is called. If err is used in subsequent calls to * rure_compile, then this pointer may change or become invalid. */ const char *rure_error_message(rure_error *err); /* * rure_escape_must returns a NUL terminated string where all meta characters * have been escaped. If escaping fails for any reason, an error message is * printed to stderr and the process is aborted. * * The pattern given should be in UTF-8. For convenience, this accepts a C * string, which means the pattern cannot contain a NUL byte. These correspond * to the only two failure conditions of this function. That is, if the caller * guarantees that the given pattern is valid UTF-8 and does not contain a * NUL byte, then this is guaranteed to succeed (modulo out-of-memory errors). * * The pointer returned must not be freed directly. Instead, it should be freed * by calling rure_cstring_free. */ const char *rure_escape_must(const char *pattern); /* * rure_cstring_free frees the string given. * * This must be called at most once per string. */ void rure_cstring_free(char *s); #ifdef __cplusplus } #endif #endif <file_sep>/regex-automata/src/nfa/mod.rs /*! Provides non-deterministic finite automata (NFA) and regex engines that use them. While NFAs and DFAs (deterministic finite automata) have equivalent *theoretical* power, their usage in practice tends to result in different engineering trade offs. While this isn't meant to be a comprehensive treatment of the topic, here are a few key trade offs that are, at minimum, true for this crate: * NFAs tend to be represented sparsely where as DFAs are represented densely. Sparse representations use less memory, but are slower to traverse. Conversely, dense representations use more memory, but are faster to traverse. (Sometimes these lines are blurred. For example, an `NFA` might choose to represent a particular state in a dense fashion, and a DFA can be built using a sparse representation via [`sparse::DFA`](crate::dfa::sparse::DFA). * NFAs have espilon transitions and DFAs don't. In practice, this means that handling a single byte in a haystack with an NFA at search time may require visiting multiple NFA states. In a DFA, each byte only requires visiting a single state. Stated differently, NFAs require a variable number of CPU instructions to process one byte in a haystack where as a DFA uses a constant number of CPU instructions to process one byte. * NFAs are generally easier to amend with secondary storage. For example, the [`thompson::pikevm::PikeVM`] uses an NFA to match, but also uses additional memory beyond the model of a finite state machine to track offsets for matching capturing groups. Conversely, the most a DFA can do is report the offset (and pattern ID) at which a match occurred. This is generally why we also compile DFAs in reverse, so that we can run them after finding the end of a match to also find the start of a match. * NFAs take worst case linear time to build, but DFAs take worst case exponential time to build. The [hybrid NFA/DFA](crate::hybrid) mitigates this challenge for DFAs in many practical cases. There are likely other differences, but the bottom line is that NFAs tend to be more memory efficient and give easier opportunities for increasing expressive power, where as DFAs are faster to search with. # Why only a Thompson NFA? Currently, the only kind of NFA we support in this crate is a [Thompson NFA](https://en.wikipedia.org/wiki/Thompson%27s_construction). This refers to a specific construction algorithm that takes the syntax of a regex pattern and converts it to an NFA. Specifically, it makes gratuitous use of epsilon transitions in order to keep its structure simple. In exchange, its construction time is linear in the size of the regex. A Thompson NFA also makes the guarantee that given any state and a character in a haystack, there is at most one transition defined for it. (Although there may be many epsilon transitions.) It possible that other types of NFAs will be added in the future, such as a [Glushkov NFA](https://en.wikipedia.org/wiki/Glushkov%27s_construction_algorithm). But currently, this crate only provides a Thompson NFA. */ #[cfg(feature = "nfa-thompson")] pub mod thompson; <file_sep>/regex-automata/tests/hybrid/mod.rs mod api; #[cfg(not(miri))] mod suite; <file_sep>/regex-automata/src/util/prefilter/memchr.rs use crate::util::{ prefilter::PrefilterI, search::{MatchKind, Span}, }; #[derive(Clone, Debug)] pub(crate) struct Memchr(u8); impl Memchr { pub(crate) fn new<B: AsRef<[u8]>>( _kind: MatchKind, needles: &[B], ) -> Option<Memchr> { #[cfg(not(feature = "perf-literal-substring"))] { None } #[cfg(feature = "perf-literal-substring")] { if needles.len() != 1 { return None; } if needles[0].as_ref().len() != 1 { return None; } Some(Memchr(needles[0].as_ref()[0])) } } } impl PrefilterI for Memchr { fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "perf-literal-substring"))] { unreachable!() } #[cfg(feature = "perf-literal-substring")] { memchr::memchr(self.0, &haystack[span]).map(|i| { let start = span.start + i; let end = start + 1; Span { start, end } }) } } fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { let b = *haystack.get(span.start)?; if self.0 == b { Some(Span { start: span.start, end: span.start + 1 }) } else { None } } fn memory_usage(&self) -> usize { 0 } fn is_fast(&self) -> bool { true } } #[derive(Clone, Debug)] pub(crate) struct Memchr2(u8, u8); impl Memchr2 { pub(crate) fn new<B: AsRef<[u8]>>( _kind: MatchKind, needles: &[B], ) -> Option<Memchr2> { #[cfg(not(feature = "perf-literal-substring"))] { None } #[cfg(feature = "perf-literal-substring")] { if needles.len() != 2 { return None; } if !needles.iter().all(|n| n.as_ref().len() == 1) { return None; } let b1 = needles[0].as_ref()[0]; let b2 = needles[1].as_ref()[0]; Some(Memchr2(b1, b2)) } } } impl PrefilterI for Memchr2 { fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "perf-literal-substring"))] { unreachable!() } #[cfg(feature = "perf-literal-substring")] { memchr::memchr2(self.0, self.1, &haystack[span]).map(|i| { let start = span.start + i; let end = start + 1; Span { start, end } }) } } fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { let b = *haystack.get(span.start)?; if self.0 == b || self.1 == b { Some(Span { start: span.start, end: span.start + 1 }) } else { None } } fn memory_usage(&self) -> usize { 0 } fn is_fast(&self) -> bool { true } } #[derive(Clone, Debug)] pub(crate) struct Memchr3(u8, u8, u8); impl Memchr3 { pub(crate) fn new<B: AsRef<[u8]>>( _kind: MatchKind, needles: &[B], ) -> Option<Memchr3> { #[cfg(not(feature = "perf-literal-substring"))] { None } #[cfg(feature = "perf-literal-substring")] { if needles.len() != 3 { return None; } if !needles.iter().all(|n| n.as_ref().len() == 1) { return None; } let b1 = needles[0].as_ref()[0]; let b2 = needles[1].as_ref()[0]; let b3 = needles[2].as_ref()[0]; Some(Memchr3(b1, b2, b3)) } } } impl PrefilterI for Memchr3 { fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { #[cfg(not(feature = "perf-literal-substring"))] { unreachable!() } #[cfg(feature = "perf-literal-substring")] { memchr::memchr3(self.0, self.1, self.2, &haystack[span]).map(|i| { let start = span.start + i; let end = start + 1; Span { start, end } }) } } fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { let b = *haystack.get(span.start)?; if self.0 == b || self.1 == b || self.2 == b { Some(Span { start: span.start, end: span.start + 1 }) } else { None } } fn memory_usage(&self) -> usize { 0 } fn is_fast(&self) -> bool { true } } <file_sep>/regex-lite/src/pikevm.rs use alloc::{vec, vec::Vec}; use crate::{ int::{NonMaxUsize, U32}, nfa::{State, StateID, NFA}, pool::CachePoolGuard, utf8, }; /// A PikeVM searcher. /// /// A PikeVM uses the standard Thompson NFA linear time search algorithm, but /// augmented to support tracking the offsets of matching capture groups. #[derive(Clone, Debug)] pub(crate) struct PikeVM { nfa: NFA, } impl PikeVM { /// Create a new PikeVM searcher that uses the given NFA. pub(crate) fn new(nfa: NFA) -> PikeVM { PikeVM { nfa } } /// Return the underlying NFA used by this PikeVM. pub(crate) fn nfa(&self) -> &NFA { &self.nfa } /// Returns an iterator of non-overlapping matches in the given haystack. pub(crate) fn find_iter<'r, 'h>( &'r self, cache: CachePoolGuard<'r>, haystack: &'h [u8], ) -> FindMatches<'r, 'h> { FindMatches { pikevm: self, cache, haystack, at: 0, slots: vec![None, None], last_match_end: None, } } /// Returns an iterator of non-overlapping capture matches in the given /// haystack. pub(crate) fn captures_iter<'r, 'h>( &'r self, cache: CachePoolGuard<'r>, haystack: &'h [u8], ) -> CapturesMatches<'r, 'h> { // OK because the NFA wouldn't have compiled if this could overflow. let len = self.nfa().group_len().checked_mul(2).unwrap(); CapturesMatches { it: FindMatches { pikevm: self, cache, haystack, at: 0, slots: vec![None; len], last_match_end: None, }, } } /// The implementation of standard leftmost search. /// /// Capturing group spans are written to `slots`, but only if requested. /// `slots` can be any length. Any slot in the NFA that is activated but /// which is out of bounds for the given `slots` is ignored. pub(crate) fn search( &self, cache: &mut Cache, haystack: &[u8], start: usize, end: usize, earliest: bool, slots: &mut [Option<NonMaxUsize>], ) -> bool { cache.setup_search(slots.len()); if start > end { return false; } // Why do we even care about this? Well, in our `slots` representation, // we use usize::MAX as a sentinel to indicate "no match." This isn't // problematic so long as our haystack doesn't have a maximal length. // Byte slices are guaranteed by Rust to have a length that fits into // isize, and so this assert should always pass. But we put it here to // make our assumption explicit. assert!( haystack.len() < core::usize::MAX, "byte slice lengths must be less than usize MAX", ); let Cache { ref mut stack, ref mut curr, ref mut next } = cache; let start_id = self.nfa().start(); let anchored = self.nfa().is_start_anchored(); let mut matched = false; // Yes, our search doesn't end at `end`, but includes it. This is // necessary because matches are delayed by one byte. The delay is used // to handle look-behind assertions. In the case of the PikeVM, the // delay is implemented by not considering a match to exist until it // is visited in `nexts`. Technically, we know a match exists in the // previous iteration via `epsilon_closure`. let mut at = start; while at <= end { // If we have no states left to visit, then there are some cases // where we know we can quit early or even skip ahead. if curr.set.is_empty() { // We have a match so we can quit. if matched { break; } // If we're running an anchored search and we've advanced // beyond the start position with no other states to try, then // we will never observe a match and thus can stop. if anchored && at > start { break; } } // Instead of using a hypothetical unanchored start state in the // NFA (which doesn't exist, but we could add it), we actually // always use its anchored starting state. As a result, when doing // an unanchored search, we need to simulate our own '(?s:.)*?' // prefix, to permit a match to appear anywhere. // // Now, we don't *have* to do things this way. We could create // a proper unanchored start state in the NFA and do one // `epsilon_closure` call from that starting state before the main // loop here. And that is just as correct. However, it turns out to // be slower than our approach here because it slightly increases // the cost of processing each byte by requiring us to visit // more NFA states to deal with the additional NFA states in the // unanchored prefix. By simulating it explicitly here, we lower // those costs substantially. The cost is itself small, but it adds // up for large haystacks. // // In order to simulate the '(?s:.)*?' prefix---which is not // greedy---we are careful not to perform an epsilon closure on // the start state if we already have a match. Namely, if we // did otherwise, we would never reach a terminating condition // because there would always be additional states to process. if !matched { // Since we are adding to the 'curr' active states and since // this is for the start ID, we use a slots slice that is // guaranteed to have the right length but where every element // is absent. This is exactly what we want, because this // epsilon closure is responsible for simulating an unanchored // '(?s:.)*?' prefix. It is specifically outside of any // capturing groups, and thus, using slots that are always // absent is correct. // // Note though that we can't just use `&mut []` here, since // this epsilon closure may traverse through `Capture` states // transitions, and thus must be able to write offsets to the // slots given which are later copied to slot values in `curr`. let slots = next.slot_table.all_absent(); self.epsilon_closure( stack, slots, curr, haystack, at, start_id, ); } let (ch, len) = utf8::decode_lossy(&haystack[at..]); if self.nexts(stack, curr, next, haystack, at, ch, len, slots) { matched = true; } // Unless the caller asked us to return early, we need to mush // on to see if we can extend our match. (But note that 'nexts' // will quit right after seeing a match, as is consistent with // leftmost-first match priority.) if (earliest && matched) || len == 0 { break; } core::mem::swap(curr, next); next.set.clear(); at += len; } matched } /// Process the active states in 'curr' to find the states (written to /// 'next') we should process for the next byte in the haystack. /// /// 'stack' is used to perform a depth first traversal of the NFA when /// computing an epsilon closure. /// /// When a match is found, the slots for that match state (in 'curr') are /// copied to 'caps'. Moreover, once a match is seen, processing for 'curr' /// stops (unless the PikeVM was configured with MatchKind::All semantics). /// /// `at_ch` is the Unicode scalar value whose UTF-8 encoding begins at `at` /// in `haystack`. /// /// `at_len` is the number of bytes consumed by `at_ch`. This is usually /// equal to `at_ch.len_utf8()`, but not always. For example, in the case /// where `at_ch` is the replacement codepoint that results from decoding /// invalid UTF-8. In that case, `at_len` can be 1, 2 or 3. fn nexts( &self, stack: &mut Vec<FollowEpsilon>, curr: &mut ActiveStates, next: &mut ActiveStates, haystack: &[u8], at: usize, at_ch: char, at_len: usize, slots: &mut [Option<NonMaxUsize>], ) -> bool { let ActiveStates { ref set, ref mut slot_table } = *curr; for sid in set.iter() { if self.next( stack, slot_table, next, haystack, at, at_ch, at_len, sid, ) { slots.copy_from_slice(slot_table.for_state(sid)); return true; } } false } /// Starting from `sid`, if the position `at` in the `haystack` has a /// transition defined out of `sid`, then add the state transitioned to and /// its epsilon closure to the `next` set of states to explore. /// /// `stack` is used by the epsilon closure computation to perform a depth /// first traversal of the NFA. /// /// `curr_slot_table` should be the table of slots for the current set of /// states being explored. If there is a transition out of `sid`, then /// sid's row in the slot table is used to perform the epsilon closure. /// /// `at_ch` is the Unicode scalar value whose UTF-8 encoding begins at `at` /// in `haystack`. The caller provides it so that this routine doesn't /// need to re-decode it. (Since it's expected that this routine is called /// multiple times for each position.) /// /// `at_len` is the number of bytes consumed by `at_ch`. This is usually /// equal to `at_ch.len_utf8()`, but not always. For example, in the case /// where `at_ch` is the replacement codepoint that results from decoding /// invalid UTF-8. In that case, `at_len` can be 1, 2 or 3. fn next( &self, stack: &mut Vec<FollowEpsilon>, curr_slot_table: &mut SlotTable, next: &mut ActiveStates, haystack: &[u8], at: usize, at_ch: char, at_len: usize, sid: StateID, ) -> bool { match *self.nfa.state(sid) { State::Fail | State::Goto { .. } | State::Splits { .. } | State::Capture { .. } => false, State::Char { target, ch } => { if at_ch == ch && at_len > 0 { let slots = curr_slot_table.for_state(sid); // OK because `at_len` is always derived from the number // of bytes read from `at` that make up `at_ch`. So this // will never wrap. let at = at.wrapping_add(at_len); self.epsilon_closure( stack, slots, next, haystack, at, target, ); } false } State::Ranges { target, ref ranges } => { for (start, end) in ranges.iter().copied() { if start > at_ch { break; } else if start <= at_ch && at_ch <= end { if at_len == 0 { return false; } let slots = curr_slot_table.for_state(sid); // OK because `at_len` is always derived from the // number of bytes read from `at` that make up `at_ch`. // So this will never wrap. let at = at.wrapping_add(at_len); self.epsilon_closure( stack, slots, next, haystack, at, target, ); } } false } State::Match => true, } } /// Compute the epsilon closure of `sid`, writing the closure into `next` /// while copying slot values from `curr_slots` into corresponding states /// in `next`. `curr_slots` should be the slot values corresponding to /// `sid`. /// /// The given `stack` is used to perform a depth first traversal of the /// NFA by recursively following all epsilon transitions out of `sid`. /// Conditional epsilon transitions are followed if and only if they are /// satisfied for the position `at` in the `input` haystack. /// /// While this routine may write to `curr_slots`, once it returns, any /// writes are undone and the original values (even if absent) are /// restored. fn epsilon_closure( &self, stack: &mut Vec<FollowEpsilon>, curr_slots: &mut [Option<NonMaxUsize>], next: &mut ActiveStates, haystack: &[u8], at: usize, sid: StateID, ) { stack.push(FollowEpsilon::Explore(sid)); while let Some(frame) = stack.pop() { match frame { FollowEpsilon::RestoreCapture { slot, offset } => { curr_slots[slot.as_usize()] = offset; } FollowEpsilon::Explore(sid) => { self.epsilon_closure_explore( stack, curr_slots, next, haystack, at, sid, ); } } } } /// Explore all of the epsilon transitions out of `sid`. This is mostly /// split out from `epsilon_closure` in order to clearly delineate /// the actual work of computing an epsilon closure from the stack /// book-keeping. /// /// This will push any additional explorations needed on to `stack`. /// /// `curr_slots` should refer to the slots for the currently active NFA /// state. That is, the current state we are stepping through. These /// slots are mutated in place as new `Captures` states are traversed /// during epsilon closure, but the slots are restored to their original /// values once the full epsilon closure is completed. The ultimate use of /// `curr_slots` is to copy them to the corresponding `next_slots`, so that /// the capturing group spans are forwarded from the currently active state /// to the next. /// /// `next` refers to the next set of active states. Computing an epsilon /// closure may increase the next set of active states. /// /// `haystack` refers to the what we're searching and `at` refers to the /// current position in the haystack. These are used to check whether /// conditional epsilon transitions (like look-around) are satisfied at /// the current position. If they aren't, then the epsilon closure won't /// include them. fn epsilon_closure_explore( &self, stack: &mut Vec<FollowEpsilon>, curr_slots: &mut [Option<NonMaxUsize>], next: &mut ActiveStates, haystack: &[u8], at: usize, mut sid: StateID, ) { // We can avoid pushing some state IDs on to our stack in precisely // the cases where a 'push(x)' would be immediately followed by a 'x // = pop()'. This is achieved by this outer-loop. We simply set 'sid' // to be the next state ID we want to explore once we're done with // our initial exploration. In practice, this avoids a lot of stack // thrashing. loop { // Record this state as part of our next set of active states. If // we've already explored it, then no need to do it again. if !next.set.insert(sid) { return; } match *self.nfa.state(sid) { State::Fail | State::Match { .. } | State::Char { .. } | State::Ranges { .. } => { next.slot_table.for_state(sid).copy_from_slice(curr_slots); return; } State::Goto { target, look: None } => { sid = target; } State::Goto { target, look: Some(look) } => { if !look.is_match(haystack, at) { return; } sid = target; } State::Splits { ref targets, reverse: false } => { sid = match targets.get(0) { None => return, Some(&sid) => sid, }; stack.extend( targets[1..] .iter() .copied() .rev() .map(FollowEpsilon::Explore), ); } State::Splits { ref targets, reverse: true } => { sid = match targets.last() { None => return, Some(&sid) => sid, }; stack.extend( targets[..targets.len() - 1] .iter() .copied() .map(FollowEpsilon::Explore), ); } State::Capture { target, slot } => { // There's no need to do anything with slots that // ultimately won't be copied into the caller-provided // 'Captures' value. So we just skip dealing with them at // all. if slot.as_usize() < curr_slots.len() { stack.push(FollowEpsilon::RestoreCapture { slot, offset: curr_slots[slot.as_usize()], }); // OK because length of a slice must fit into an isize. curr_slots[slot.as_usize()] = Some(NonMaxUsize::new(at).unwrap()); } sid = target; } } } } } /// An iterator over all successive non-overlapping matches in a particular /// haystack. `'r` represents the lifetime of the regex, `'c` is the lifetime /// of the cache and `'h` represents the lifetime of the haystack. #[derive(Debug)] pub(crate) struct FindMatches<'r, 'h> { pikevm: &'r PikeVM, cache: CachePoolGuard<'r>, haystack: &'h [u8], at: usize, slots: Vec<Option<NonMaxUsize>>, last_match_end: Option<usize>, } impl<'r, 'h> Iterator for FindMatches<'r, 'h> { type Item = (usize, usize); fn next(&mut self) -> Option<(usize, usize)> { if !self.pikevm.search( &mut self.cache, self.haystack, self.at, self.haystack.len(), false, &mut self.slots, ) { return None; } let mut m = (self.slots[0].unwrap().get(), self.slots[1].unwrap().get()); if m.0 >= m.1 { m = self.handle_overlapping_empty_match(m)?; } self.at = m.1; self.last_match_end = Some(m.1); Some(m) } } impl<'r, 'h> FindMatches<'r, 'h> { /// Handles the special case of an empty match by ensuring that 1) the /// iterator always advances and 2) empty matches never overlap with other /// matches. /// /// Note that we mark this cold and forcefully prevent inlining because /// handling empty matches like this is extremely rare and does require a /// bit of code, comparatively. Keeping this code out of the main iterator /// function keeps it smaller and more amenable to inlining itself. #[cold] #[inline(never)] fn handle_overlapping_empty_match( &mut self, mut m: (usize, usize), ) -> Option<(usize, usize)> { assert!(m.0 >= m.1); if Some(m.1) == self.last_match_end { let len = core::cmp::max(1, utf8::decode(&self.haystack[self.at..]).1); self.at = self.at.checked_add(len).unwrap(); if !self.pikevm.search( &mut self.cache, self.haystack, self.at, self.haystack.len(), false, &mut self.slots, ) { return None; } m = (self.slots[0].unwrap().get(), self.slots[1].unwrap().get()); } Some(m) } } /// An iterator over all successive non-overlapping capture matches in a particular /// haystack. `'r` represents the lifetime of the regex, `'c` is the lifetime /// of the cache and `'h` represents the lifetime of the haystack. #[derive(Debug)] pub(crate) struct CapturesMatches<'r, 'h> { it: FindMatches<'r, 'h>, } impl<'r, 'h> Iterator for CapturesMatches<'r, 'h> { type Item = Vec<Option<NonMaxUsize>>; fn next(&mut self) -> Option<Vec<Option<NonMaxUsize>>> { self.it.next()?; Some(self.it.slots.clone()) } } /// A cache represents mutable state that a `PikeVM` requires during a search. /// /// For a given `PikeVM`, its corresponding cache may be created either via /// `PikeVM::create_cache`, or via `Cache::new`. They are equivalent in every /// way, except the former does not require explicitly importing `Cache`. /// /// A particular `Cache` is coupled with the `PikeVM` from which it was /// created. It may only be used with that `PikeVM`. A cache and its /// allocations may be re-purposed via `Cache::reset`, in which case, it can /// only be used with the new `PikeVM` (and not the old one). #[derive(Clone, Debug)] pub(crate) struct Cache { /// Stack used while computing epsilon closure. This effectively lets us /// move what is more naturally expressed through recursion to a stack /// on the heap. stack: Vec<FollowEpsilon>, /// The current active states being explored for the current byte in the /// haystack. curr: ActiveStates, /// The next set of states we're building that will be explored for the /// next byte in the haystack. next: ActiveStates, } impl Cache { /// Create a new `PikeVM` cache. /// /// A potentially more convenient routine to create a cache is /// `PikeVM::create_cache`, as it does not require also importing the /// `Cache` type. /// /// If you want to reuse the returned `Cache` with some other `PikeVM`, /// then you must call `Cache::reset` with the desired `PikeVM`. pub(crate) fn new(re: &PikeVM) -> Cache { Cache { stack: vec![], curr: ActiveStates::new(re), next: ActiveStates::new(re), } } /// Clears this cache. This should be called at the start of every search /// to ensure we start with a clean slate. /// /// This also sets the length of the capturing groups used in the current /// search. This permits an optimization where by 'SlotTable::for_state' /// only returns the number of slots equivalent to the number of slots /// given in the 'Captures' value. This may be less than the total number /// of possible slots, e.g., when one only wants to track overall match /// offsets. This in turn permits less copying of capturing group spans /// in the PikeVM. fn setup_search(&mut self, captures_slot_len: usize) { self.stack.clear(); self.curr.setup_search(captures_slot_len); self.next.setup_search(captures_slot_len); } } /// A set of active states used to "simulate" the execution of an NFA via the /// PikeVM. /// /// There are two sets of these used during NFA simulation. One set corresponds /// to the "current" set of states being traversed for the current position /// in a haystack. The other set corresponds to the "next" set of states being /// built, which will become the new "current" set for the next position in the /// haystack. These two sets correspond to CLIST and NLIST in Thompson's /// original paper regexes: https://dl.acm.org/doi/pdf/10.1145/363347.363387 /// /// In addition to representing a set of NFA states, this also maintains slot /// values for each state. These slot values are what turn the NFA simulation /// into the "Pike VM." Namely, they track capturing group values for each /// state. During the computation of epsilon closure, we copy slot values from /// states in the "current" set to the "next" set. Eventually, once a match /// is found, the slot values for that match state are what we write to the /// caller provided slots. #[derive(Clone, Debug)] struct ActiveStates { /// The set of active NFA states. This set preserves insertion order, which /// is critical for simulating the match semantics of backtracking regex /// engines. set: SparseSet, /// The slots for every NFA state, where each slot stores a (possibly /// absent) offset. Every capturing group has two slots. One for a start /// offset and one for an end offset. slot_table: SlotTable, } impl ActiveStates { /// Create a new set of active states for the given PikeVM. The active /// states returned may only be used with the given PikeVM. (Use 'reset' /// to re-purpose the allocation for a different PikeVM.) fn new(re: &PikeVM) -> ActiveStates { let mut active = ActiveStates { set: SparseSet::new(0), slot_table: SlotTable::new(), }; active.reset(re); active } /// Reset this set of active states such that it can be used with the given /// PikeVM (and only that PikeVM). fn reset(&mut self, re: &PikeVM) { self.set.resize(re.nfa().len()); self.slot_table.reset(re); } /// Setup this set of active states for a new search. The given slot /// length should be the number of slots in a caller provided 'Captures' /// (and may be zero). fn setup_search(&mut self, captures_slot_len: usize) { self.set.clear(); self.slot_table.setup_search(captures_slot_len); } } /// A table of slots, where each row represent a state in an NFA. Thus, the /// table has room for storing slots for every single state in an NFA. /// /// This table is represented with a single contiguous allocation. In general, /// the notion of "capturing group" doesn't really exist at this level of /// abstraction, hence the name "slot" instead. (Indeed, every capturing group /// maps to a pair of slots, one for the start offset and one for the end /// offset.) Slots are indexed by the `Captures` NFA state. #[derive(Clone, Debug)] struct SlotTable { /// The actual table of offsets. table: Vec<Option<NonMaxUsize>>, /// The number of slots per state, i.e., the table's stride or the length /// of each row. slots_per_state: usize, /// The number of slots in the caller-provided `Captures` value for the /// current search. Setting this to `slots_per_state` is always correct, /// but may be wasteful. slots_for_captures: usize, } impl SlotTable { /// Create a new slot table. /// /// One should call 'reset' with the corresponding PikeVM before use. fn new() -> SlotTable { SlotTable { table: vec![], slots_for_captures: 0, slots_per_state: 0 } } /// Reset this slot table such that it can be used with the given PikeVM /// (and only that PikeVM). fn reset(&mut self, re: &PikeVM) { let nfa = re.nfa(); // OK because NFA construction would have failed if this overflowed. self.slots_per_state = nfa.group_len().checked_mul(2).unwrap(); // This is always correct, but may be reduced for a particular search // if fewer slots were given by the caller, e.g., none at all or only // slots for tracking the overall match instead of all slots for every // group. self.slots_for_captures = self.slots_per_state; let len = nfa .len() // We add 1 so that our last row is always empty. We use it as // "scratch" space for computing the epsilon closure off of the // starting state. .checked_add(1) .and_then(|x| x.checked_mul(self.slots_per_state)) // It seems like this could actually panic on legitimate inputs // on 32-bit targets. Should we somehow convert this to an error? // What about something similar for the lazy DFA cache? If you're // tripping this assert, please file a bug. .expect("slot table length doesn't overflow"); self.table.resize(len, None); } /// Perform any per-search setup for this slot table. /// /// In particular, this sets the length of the number of slots used in the /// slots given by the caller (if any at all). This number may be smaller /// than the total number of slots available, e.g., when the caller is only /// interested in tracking the overall match and not the spans of every /// matching capturing group. Only tracking the overall match can save a /// substantial amount of time copying capturing spans during a search. fn setup_search(&mut self, captures_slot_len: usize) { self.slots_for_captures = captures_slot_len; } /// Return a mutable slice of the slots for the given state. /// /// Note that the length of the slice returned may be less than the total /// number of slots available for this state. In particular, the length /// always matches the number of slots indicated via `setup_search`. fn for_state(&mut self, sid: StateID) -> &mut [Option<NonMaxUsize>] { let i = sid.as_usize() * self.slots_per_state; &mut self.table[i..i + self.slots_for_captures] } /// Return a slice of slots of appropriate length where every slot offset /// is guaranteed to be absent. This is useful in cases where you need to /// compute an epsilon closure outside of the user supplied regex, and thus /// never want it to have any capturing slots set. fn all_absent(&mut self) -> &mut [Option<NonMaxUsize>] { let i = self.table.len() - self.slots_per_state; &mut self.table[i..i + self.slots_for_captures] } } /// Represents a stack frame for use while computing an epsilon closure. /// /// (An "epsilon closure" refers to the set of reachable NFA states from a /// single state without consuming any input. That is, the set of all epsilon /// transitions not only from that single state, but from every other state /// reachable by an epsilon transition as well. This is why it's called a /// "closure.") /// /// Computing the epsilon closure in a Thompson NFA proceeds via a depth /// first traversal over all epsilon transitions from a particular state. /// (A depth first traversal is important because it emulates the same priority /// of matches that is typically found in backtracking regex engines.) This /// depth first traversal is naturally expressed using recursion, but to avoid /// a call stack size proportional to the size of a regex, we put our stack on /// the heap instead. /// /// This stack thus consists of call frames. The typical call frame is /// `Explore`, which instructs epsilon closure to explore the epsilon /// transitions from that state. (Subsequent epsilon transitions are then /// pushed on to the stack as more `Explore` frames.) If the state ID being /// explored has no epsilon transitions, then the capturing group slots are /// copied from the original state that sparked the epsilon closure (from the /// 'step' routine) to the state ID being explored. This way, capturing group /// slots are forwarded from the previous state to the next. /// /// The other stack frame, `RestoreCaptures`, instructs the epsilon closure to /// set the position for a particular slot back to some particular offset. This /// frame is pushed when `Explore` sees a `Capture` transition. `Explore` will /// set the offset of the slot indicated in `Capture` to the current offset, /// and then push the old offset on to the stack as a `RestoreCapture` frame. /// Thus, the new offset is only used until the epsilon closure reverts back to /// the `RestoreCapture` frame. In effect, this gives the `Capture` epsilon /// transition its "scope" to only states that come "after" it during depth /// first traversal. #[derive(Clone, Debug)] enum FollowEpsilon { /// Explore the epsilon transitions from a state ID. Explore(StateID), /// Reset the given `slot` to the given `offset` (which might be `None`). RestoreCapture { slot: u32, offset: Option<NonMaxUsize> }, } /// A sparse set used for representing ordered NFA states. /// /// This supports constant time addition and membership testing. Clearing an /// entire set can also be done in constant time. Iteration yields elements /// in the order in which they were inserted. /// /// The data structure is based on: https://research.swtch.com/sparse /// Note though that we don't actually use uninitialized memory. We generally /// reuse sparse sets, so the initial allocation cost is bareable. However, its /// other properties listed above are extremely useful. #[derive(Clone)] struct SparseSet { /// The number of elements currently in this set. len: usize, /// Dense contains the ids in the order in which they were inserted. dense: Vec<StateID>, /// Sparse maps ids to their location in dense. /// /// A state ID is in the set if and only if /// sparse[id] < len && id == dense[sparse[id]]. /// /// Note that these are indices into 'dense'. It's a little weird to use /// StateID here, but we know our length can never exceed the bounds of /// StateID (enforced by 'resize') and StateID will be at most 4 bytes /// where as a usize is likely double that in most cases. sparse: Vec<StateID>, } impl SparseSet { /// Create a new sparse set with the given capacity. /// /// Sparse sets have a fixed size and they cannot grow. Attempting to /// insert more distinct elements than the total capacity of the set will /// result in a panic. /// /// This panics if the capacity given is bigger than `StateID::LIMIT`. fn new(capacity: usize) -> SparseSet { let mut set = SparseSet { len: 0, dense: vec![], sparse: vec![] }; set.resize(capacity); set } /// Resizes this sparse set to have the new capacity given. /// /// This set is automatically cleared. /// /// This panics if the capacity given is bigger than `StateID::LIMIT`. fn resize(&mut self, new_capacity: usize) { assert!( new_capacity <= u32::MAX.as_usize(), "sparse set capacity cannot excced {:?}", u32::MAX, ); self.clear(); self.dense.resize(new_capacity, 0); self.sparse.resize(new_capacity, 0); } /// Returns the capacity of this set. /// /// The capacity represents a fixed limit on the number of distinct /// elements that are allowed in this set. The capacity cannot be changed. fn capacity(&self) -> usize { self.dense.len() } /// Returns the number of elements in this set. fn len(&self) -> usize { self.len } /// Returns true if and only if this set is empty. fn is_empty(&self) -> bool { self.len() == 0 } /// Insert the state ID value into this set and return true if the given /// state ID was not previously in this set. /// /// This operation is idempotent. If the given value is already in this /// set, then this is a no-op. /// /// If more than `capacity` ids are inserted, then this panics. fn insert(&mut self, id: StateID) -> bool { if self.contains(id) { return false; } let index = self.len(); assert!( index < self.capacity(), "{:?} exceeds capacity of {:?} when inserting {:?}", index, self.capacity(), id, ); self.dense[index] = id; // OK because we don't permit the capacity to be set higher than // u32::MAX. self.sparse[id.as_usize()] = u32::try_from(index).unwrap(); self.len += 1; true } /// Returns true if and only if this set contains the given value. fn contains(&self, id: StateID) -> bool { let index = self.sparse[id.as_usize()]; index.as_usize() < self.len() && self.dense[index.as_usize()] == id } /// Clear this set such that it has no members. fn clear(&mut self) { self.len = 0; } /// Returns an iterator over all the state IDs in this set in the order in /// which they were inserted. fn iter(&self) -> SparseSetIter<'_> { SparseSetIter(self.dense[..self.len()].iter()) } } impl core::fmt::Debug for SparseSet { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let elements: Vec<StateID> = self.iter().collect(); f.debug_tuple("SparseSet").field(&elements).finish() } } /// An iterator over all elements in a sparse set. /// /// The lifetime `'a` refers to the lifetime of the set being iterated over. #[derive(Debug)] struct SparseSetIter<'a>(core::slice::Iter<'a, StateID>); impl<'a> Iterator for SparseSetIter<'a> { type Item = StateID; fn next(&mut self) -> Option<StateID> { self.0.next().map(|&id| id) } } <file_sep>/regex-cli/Cargo.toml [package] name = "regex-cli" version = "0.1.0" #:version authors = ["The Rust Project Developers", "<NAME> <<EMAIL>>"] description = """ A command line tool for debugging, ad hoc benchmarking and generating regular expressions. """ repository = "https://github.com/rust-lang/regex/tree/master/regex-cli" keywords = ["regex", "cli", "debug", "nfa", "dfa"] license = "MIT OR Apache-2.0" categories = ["text-processing"] autotests = false edition = "2021" [[bin]] name = "regex-cli" path = "main.rs" [features] default = [] instrument = ["regex-automata/internal-instrument"] [dependencies] anyhow = "1.0.28" bstr = { version = "1.4.0", default-features = false, features = ["std"] } lexopt = "0.3.0" log = { version = "0.4.17", features = ["std"] } memmap2 = "0.5.10" regex = { version = "1.9.0", path = ".." } regex-automata = { version = "0.3.0", path = "../regex-automata", features = ["logging"] } regex-lite = { version = "0.1.0", path = "../regex-lite" } regex-syntax = { version = "0.7.3", path = "../regex-syntax" } tabwriter = { version = "1.2.1", features = ["ansi_formatting"] } textwrap = { version = "0.16.0", default-features = false } <file_sep>/Cargo.toml [package] name = "regex" version = "1.9.5" #:version authors = ["The Rust Project Developers", "<NAME> <<EMAIL>>"] license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/rust-lang/regex" documentation = "https://docs.rs/regex" homepage = "https://github.com/rust-lang/regex" description = """ An implementation of regular expressions for Rust. This implementation uses finite automata and guarantees linear time matching on all inputs. """ categories = ["text-processing"] autotests = false exclude = ["/scripts/*", "/.github/*"] edition = "2021" rust-version = "1.60.0" [workspace] members = [ "regex-automata", "regex-capi", "regex-cli", "regex-lite", "regex-syntax", "regex-test", ] # Features are documented in the "Crate features" section of the crate docs: # https://docs.rs/regex/*/#crate-features [features] default = ["std", "perf", "unicode", "regex-syntax/default"] # ECOSYSTEM FEATURES # The 'std' feature permits the regex crate to use the standard library. This # is intended to support future use cases where the regex crate may be able # to compile without std, and instead just rely on 'core' and 'alloc' (for # example). Currently, this isn't supported, and removing the 'std' feature # will prevent regex from compiling. std = [ "aho-corasick?/std", "memchr?/std", "regex-automata/std", "regex-syntax/std", ] # This feature enables the 'log' crate to emit messages. This is usually # only useful for folks working on the regex crate itself, but can be useful # if you're trying hard to do some performance hacking on regex patterns # themselves. Note that you'll need to pair this with a crate like 'env_logger' # to actually emit the log messages somewhere. logging = [ "aho-corasick?/logging", "memchr?/logging", "regex-automata/logging", ] # The 'use_std' feature is DEPRECATED. It will be removed in regex 2. Until # then, it is an alias for the 'std' feature. use_std = ["std"] # PERFORMANCE FEATURES # Enables all default performance features. Note that this specifically does # not include perf-dfa-full, because it leads to higher compile times and # bigger binaries, and the runtime performance improvement is not obviously # worth it. perf = [ "perf-cache", "perf-dfa", "perf-onepass", "perf-backtrack", "perf-inline", "perf-literal", ] # Enables use of a lazy DFA when possible. perf-dfa = ["regex-automata/hybrid"] # Enables use of a fully compiled DFA when possible. perf-dfa-full = ["regex-automata/dfa-build", "regex-automata/dfa-search"] # Enables use of the one-pass regex matcher, which speeds up capture searches # even beyond the backtracker. perf-onepass = ["regex-automata/dfa-onepass"] # Enables use of a bounded backtracker, which speeds up capture searches. perf-backtrack = ["regex-automata/nfa-backtrack"] # Enables aggressive use of inlining. perf-inline = ["regex-automata/perf-inline"] # Enables literal optimizations. perf-literal = [ "dep:aho-corasick", "dep:memchr", "regex-automata/perf-literal", ] # Enables fast caching. (If disabled, caching is still used, but is slower.) # Currently, this feature has no effect. It used to remove the thread_local # dependency and use a slower internal cache, but now the default cache has # been improved and thread_local is no longer a dependency at all. perf-cache = [] # UNICODE DATA FEATURES # Enables all Unicode features. This expands if new Unicode features are added. unicode = [ "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment", "regex-automata/unicode", "regex-syntax/unicode", ] # Enables use of the `Age` property, e.g., `\p{Age:3.0}`. unicode-age = [ "regex-automata/unicode-age", "regex-syntax/unicode-age", ] # Enables use of a smattering of boolean properties, e.g., `\p{Emoji}`. unicode-bool = [ "regex-automata/unicode-bool", "regex-syntax/unicode-bool", ] # Enables Unicode-aware case insensitive matching, e.g., `(?i)β`. unicode-case = [ "regex-automata/unicode-case", "regex-syntax/unicode-case", ] # Enables Unicode general categories, e.g., `\p{Letter}` or `\pL`. unicode-gencat = [ "regex-automata/unicode-gencat", "regex-syntax/unicode-gencat", ] # Enables Unicode-aware Perl classes corresponding to `\w`, `\s` and `\d`. unicode-perl = [ "regex-automata/unicode-perl", "regex-automata/unicode-word-boundary", "regex-syntax/unicode-perl", ] # Enables Unicode scripts and script extensions, e.g., `\p{Greek}`. unicode-script = [ "regex-automata/unicode-script", "regex-syntax/unicode-script", ] # Enables Unicode segmentation properties, e.g., `\p{gcb=Extend}`. unicode-segment = [ "regex-automata/unicode-segment", "regex-syntax/unicode-segment", ] # UNSTABLE FEATURES (requires Rust nightly) # A blanket feature that governs whether unstable features are enabled or not. # Unstable features are disabled by default, and typically rely on unstable # features in rustc itself. unstable = ["pattern"] # Enable to use the unstable pattern traits defined in std. This is enabled # by default if the unstable feature is enabled. pattern = [] # For very fast multi-prefix literal matching. [dependencies.aho-corasick] version = "1.0.0" optional = true # For skipping along search text quickly when a leading byte is known. [dependencies.memchr] version = "2.6.0" optional = true # For the actual regex engines. [dependencies.regex-automata] path = "regex-automata" version = "0.3.8" default-features = false features = ["alloc", "syntax", "meta", "nfa-pikevm"] # For parsing regular expressions. [dependencies.regex-syntax] path = "regex-syntax" version = "0.7.5" default-features = false [dev-dependencies] # For examples. once_cell = "1.17.1" # For property based tests. quickcheck = { version = "1.0.3", default-features = false } # To check README's example doc-comment = "0.3" # For easy error handling in integration tests. anyhow = "1.0.69" # A library for testing regex engines. regex-test = { path = "regex-test", version = "0.1.0" } [dev-dependencies.env_logger] # Note that this is currently using an older version because of the dependency # tree explosion that happened in 0.10. version = "0.9.3" default-features = false features = ["atty", "humantime", "termcolor"] # This test suite reads a whole boatload of tests from the top-level testdata # directory, and then runs them against the regex crate API. # # regex-automata has its own version of them, and runs them against each # internal regex engine individually. # # This means that if you're seeing a failure in this test suite, you should # try running regex-automata's tests: # # cargo test --manifest-path regex-automata/Cargo.toml --test integration # # That *might* give you a more targeted test failure. i.e., "only the # PikeVM fails this test." Which gives you a narrower place to search. If # regex-automata's test suite passes, then the bug might be in the integration # of the regex crate and regex-automata. But generally speaking, a failure # in this test suite *should* mean there is a corresponding failure in # regex-automata's test suite. [[test]] path = "tests/lib.rs" name = "integration" [package.metadata.docs.rs] # We want to document all features. all-features = true # Since this crate's feature setup is pretty complicated, it is worth opting # into a nightly unstable option to show the features that need to be enabled # for public API items. To do that, we set 'docsrs', and when that's enabled, # we enable the 'doc_auto_cfg' feature. # # To test this locally, run: # # RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features rustdoc-args = ["--cfg", "docsrs"] [profile.release] debug = true [profile.bench] debug = true [profile.dev] # Running tests takes too long in debug mode, so we forcefully always build # with optimizations. Unfortunate, but, ¯\_(ツ)_/¯. # # It's counter-intuitive that this needs to be set on dev *and* test, but # it's because the tests that take a long time to run are run as integration # tests in a separate crate. The test.opt-level setting won't apply there, so # we need to set the opt-level across the entire build. opt-level = 3 debug = true [profile.test] opt-level = 3 debug = true <file_sep>/regex-automata/src/dfa/accel.rs // This module defines some core types for dealing with accelerated DFA states. // Briefly, a DFA state can be "accelerated" if all of its transitions except // for a few loop back to itself. This directly implies that the only way out // of such a state is if a byte corresponding to one of those non-loopback // transitions is found. Such states are often found in simple repetitions in // non-Unicode regexes. For example, consider '(?-u)[^a]+a'. We can look at its // DFA with regex-cli: // // $ regex-cli debug dfa dense '(?-u)[^a]+a' -BbC // dense::DFA( // D 000000: // Q 000001: // *000002: // A 000003: \x00-` => 3, a => 5, b-\xFF => 3 // >000004: \x00-` => 3, a => 4, b-\xFF => 3 // 000005: \x00-\xFF => 2, EOI => 2 // ) // // In particular, state 3 is accelerated (shown via the 'A' indicator) since // the only way to leave that state once entered is to see an 'a' byte. If // there is a long run of non-'a' bytes, then using something like 'memchr' // to find the next 'a' byte can be significantly faster than just using the // standard byte-at-a-time state machine. // // Unfortunately, this optimization rarely applies when Unicode is enabled. // For example, patterns like '[^a]' don't actually match any byte that isn't // 'a', but rather, any UTF-8 encoding of a Unicode scalar value that isn't // 'a'. This makes the state machine much more complex---far beyond a single // state---and removes the ability to easily accelerate it. (Because if the // machine sees a non-UTF-8 sequence, then the machine won't match through it.) // // In practice, we only consider accelerating states that have 3 or fewer // non-loop transitions. At a certain point, you get diminishing returns, but // also because that's what the memchr crate supports. The structures below // hard-code this assumption and provide (de)serialization APIs for use inside // a DFA. // // And finally, note that there is some trickery involved in making it very // fast to not only check whether a state is accelerated at search time, but // also to access the bytes to search for to implement the acceleration itself. // dfa/special.rs provides more detail, but the short story is that all // accelerated states appear contiguously in a DFA. This means we can represent // the ID space of all accelerated DFA states with a single range. So given // a state ID, we can determine whether it's accelerated via // // min_accel_id <= id <= max_accel_id // // And find its corresponding accelerator with: // // accels.get((id - min_accel_id) / dfa_stride) #[cfg(feature = "dfa-build")] use alloc::{vec, vec::Vec}; use crate::util::{ int::Pointer, memchr, wire::{self, DeserializeError, Endian, SerializeError}, }; /// The base type used to represent a collection of accelerators. /// /// While an `Accel` is represented as a fixed size array of bytes, a /// *collection* of `Accel`s (called `Accels`) is represented internally as a /// slice of u32. While it's a bit unnatural to do this and costs us a bit of /// fairly low-risk not-safe code, it lets us remove the need for a second type /// parameter in the definition of dense::DFA. (Which really wants everything /// to be a slice of u32.) type AccelTy = u32; /// The size of the unit of representation for accelerators. /// /// ACCEL_CAP *must* be a multiple of this size. const ACCEL_TY_SIZE: usize = core::mem::size_of::<AccelTy>(); /// The maximum length in bytes that a single Accel can be. This is distinct /// from the capacity of an accelerator in that the length represents only the /// bytes that should be read. const ACCEL_LEN: usize = 4; /// The capacity of each accelerator, in bytes. We set this to 8 since it's a /// multiple of 4 (our ID size) and because it gives us a little wiggle room /// if we want to support more accel bytes in the future without a breaking /// change. /// /// This MUST be a multiple of ACCEL_TY_SIZE. const ACCEL_CAP: usize = 8; /// Search for between 1 and 3 needle bytes in the given haystack, starting the /// search at the given position. If `needles` has a length other than 1-3, /// then this panics. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn find_fwd( needles: &[u8], haystack: &[u8], at: usize, ) -> Option<usize> { let bs = needles; let i = match needles.len() { 1 => memchr::memchr(bs[0], &haystack[at..])?, 2 => memchr::memchr2(bs[0], bs[1], &haystack[at..])?, 3 => memchr::memchr3(bs[0], bs[1], bs[2], &haystack[at..])?, 0 => panic!("cannot find with empty needles"), n => panic!("invalid needles length: {}", n), }; Some(at + i) } /// Search for between 1 and 3 needle bytes in the given haystack in reverse, /// starting the search at the given position. If `needles` has a length other /// than 1-3, then this panics. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn find_rev( needles: &[u8], haystack: &[u8], at: usize, ) -> Option<usize> { let bs = needles; match needles.len() { 1 => memchr::memrchr(bs[0], &haystack[..at]), 2 => memchr::memrchr2(bs[0], bs[1], &haystack[..at]), 3 => memchr::memrchr3(bs[0], bs[1], bs[2], &haystack[..at]), 0 => panic!("cannot find with empty needles"), n => panic!("invalid needles length: {}", n), } } /// Represents the accelerators for all accelerated states in a dense DFA. /// /// The `A` type parameter represents the type of the underlying bytes. /// Generally, this is either `&[AccelTy]` or `Vec<AccelTy>`. #[derive(Clone)] pub(crate) struct Accels<A> { /// A length prefixed slice of contiguous accelerators. See the top comment /// in this module for more details on how we can jump from a DFA's state /// ID to an accelerator in this list. /// /// The first 4 bytes always correspond to the number of accelerators /// that follow. accels: A, } #[cfg(feature = "dfa-build")] impl Accels<Vec<AccelTy>> { /// Create an empty sequence of accelerators for a DFA. pub fn empty() -> Accels<Vec<AccelTy>> { Accels { accels: vec![0] } } /// Add an accelerator to this sequence. /// /// This adds to the accelerator to the end of the sequence and therefore /// should be done in correspondence with its state in the DFA. /// /// This panics if this results in more accelerators than AccelTy::MAX. pub fn add(&mut self, accel: Accel) { self.accels.extend_from_slice(&accel.as_accel_tys()); let len = self.len(); self.set_len(len + 1); } /// Set the number of accelerators in this sequence, which is encoded in /// the first 4 bytes of the underlying bytes. fn set_len(&mut self, new_len: usize) { // The only way an accelerator gets added is if a state exists for // it, and if a state exists, then its index is guaranteed to be // representable by a AccelTy by virtue of the guarantees provided by // StateID. let new_len = AccelTy::try_from(new_len).unwrap(); self.accels[0] = new_len; } } impl<'a> Accels<&'a [AccelTy]> { /// Deserialize a sequence of accelerators from the given bytes. If there /// was a problem deserializing, then an error is returned. /// /// This is guaranteed to run in constant time. This does not guarantee /// that every accelerator in the returned collection is valid. Thus, /// accessing one may panic, or not-safe code that relies on accelerators /// being correct my result in UB. /// /// Callers may check the validity of every accelerator with the `validate` /// method. pub fn from_bytes_unchecked( mut slice: &'a [u8], ) -> Result<(Accels<&'a [AccelTy]>, usize), DeserializeError> { let slice_start = slice.as_ptr().as_usize(); let (accel_len, _) = wire::try_read_u32_as_usize(slice, "accelerators length")?; // The accelerator length is part of the accel_tys slice that // we deserialize. This is perhaps a bit idiosyncratic. It would // probably be better to split out the length into a real field. let accel_tys_len = wire::add( wire::mul(accel_len, 2, "total number of accelerator accel_tys")?, 1, "total number of accel_tys", )?; let accel_tys_bytes_len = wire::mul( ACCEL_TY_SIZE, accel_tys_len, "total number of bytes in accelerators", )?; wire::check_slice_len(slice, accel_tys_bytes_len, "accelerators")?; wire::check_alignment::<AccelTy>(slice)?; let accel_tys = &slice[..accel_tys_bytes_len]; slice = &slice[accel_tys_bytes_len..]; // SAFETY: We've checked the length and alignment above, and since // slice is just bytes and AccelTy is just a u32, we can safely cast to // a slice of &[AccelTy]. let accels = unsafe { core::slice::from_raw_parts( accel_tys.as_ptr().cast::<AccelTy>(), accel_tys_len, ) }; Ok((Accels { accels }, slice.as_ptr().as_usize() - slice_start)) } } impl<A: AsRef<[AccelTy]>> Accels<A> { /// Return an owned version of the accelerators. #[cfg(feature = "alloc")] pub fn to_owned(&self) -> Accels<alloc::vec::Vec<AccelTy>> { Accels { accels: self.accels.as_ref().to_vec() } } /// Return a borrowed version of the accelerators. pub fn as_ref(&self) -> Accels<&[AccelTy]> { Accels { accels: self.accels.as_ref() } } /// Return the bytes representing the serialization of the accelerators. pub fn as_bytes(&self) -> &[u8] { let accels = self.accels.as_ref(); // SAFETY: This is safe because accels is a just a slice of AccelTy, // and u8 always has a smaller alignment. unsafe { core::slice::from_raw_parts( accels.as_ptr().cast::<u8>(), accels.len() * ACCEL_TY_SIZE, ) } } /// Returns the memory usage, in bytes, of these accelerators. /// /// The memory usage is computed based on the number of bytes used to /// represent all of the accelerators. /// /// This does **not** include the stack size used by this value. pub fn memory_usage(&self) -> usize { self.as_bytes().len() } /// Return the bytes to search for corresponding to the accelerator in this /// sequence at index `i`. If no such accelerator exists, then this panics. /// /// The significance of the index is that it should be in correspondence /// with the index of the corresponding DFA. That is, accelerated DFA /// states are stored contiguously in the DFA and have an ordering implied /// by their respective state IDs. The state's index in that sequence /// corresponds to the index of its corresponding accelerator. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn needles(&self, i: usize) -> &[u8] { if i >= self.len() { panic!("invalid accelerator index {}", i); } let bytes = self.as_bytes(); let offset = ACCEL_TY_SIZE + i * ACCEL_CAP; let len = usize::from(bytes[offset]); &bytes[offset + 1..offset + 1 + len] } /// Return the total number of accelerators in this sequence. pub fn len(&self) -> usize { // This should never panic since deserialization checks that the // length can fit into a usize. usize::try_from(self.accels.as_ref()[0]).unwrap() } /// Return the accelerator in this sequence at index `i`. If no such /// accelerator exists, then this returns None. /// /// See the docs for `needles` on the significance of the index. fn get(&self, i: usize) -> Option<Accel> { if i >= self.len() { return None; } let offset = ACCEL_TY_SIZE + i * ACCEL_CAP; let accel = Accel::from_slice(&self.as_bytes()[offset..]) .expect("Accels must contain valid accelerators"); Some(accel) } /// Returns an iterator of accelerators in this sequence. fn iter(&self) -> IterAccels<'_, A> { IterAccels { accels: self, i: 0 } } /// Writes these accelerators to the given byte buffer using the indicated /// endianness. If the given buffer is too small, then an error is /// returned. Upon success, the total number of bytes written is returned. /// The number of bytes written is guaranteed to be a multiple of 8. pub fn write_to<E: Endian>( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); assert_eq!( nwrite % ACCEL_TY_SIZE, 0, "expected accelerator bytes written to be a multiple of {}", ACCEL_TY_SIZE, ); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("accelerators")); } // The number of accelerators can never exceed AccelTy::MAX. E::write_u32(AccelTy::try_from(self.len()).unwrap(), dst); // The actual accelerators are just raw bytes and thus their endianness // is irrelevant. So we can copy them as bytes. dst[ACCEL_TY_SIZE..nwrite] .copy_from_slice(&self.as_bytes()[ACCEL_TY_SIZE..nwrite]); Ok(nwrite) } /// Validates that every accelerator in this collection can be successfully /// deserialized as a valid accelerator. pub fn validate(&self) -> Result<(), DeserializeError> { for chunk in self.as_bytes()[ACCEL_TY_SIZE..].chunks(ACCEL_CAP) { let _ = Accel::from_slice(chunk)?; } Ok(()) } /// Returns the total number of bytes written by `write_to`. pub fn write_to_len(&self) -> usize { self.as_bytes().len() } } impl<A: AsRef<[AccelTy]>> core::fmt::Debug for Accels<A> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "Accels(")?; let mut list = f.debug_list(); for a in self.iter() { list.entry(&a); } list.finish()?; write!(f, ")") } } #[derive(Debug)] struct IterAccels<'a, A: AsRef<[AccelTy]>> { accels: &'a Accels<A>, i: usize, } impl<'a, A: AsRef<[AccelTy]>> Iterator for IterAccels<'a, A> { type Item = Accel; fn next(&mut self) -> Option<Accel> { let accel = self.accels.get(self.i)?; self.i += 1; Some(accel) } } /// Accel represents a structure for determining how to "accelerate" a DFA /// state. /// /// Namely, it contains zero or more bytes that must be seen in order for the /// DFA to leave the state it is associated with. In practice, the actual range /// is 1 to 3 bytes. /// /// The purpose of acceleration is to identify states whose vast majority /// of transitions are just loops back to the same state. For example, /// in the regex `(?-u)^[^a]+b`, the corresponding DFA will have a state /// (corresponding to `[^a]+`) where all transitions *except* for `a` and /// `b` loop back to itself. Thus, this state can be "accelerated" by simply /// looking for the next occurrence of either `a` or `b` instead of explicitly /// following transitions. (In this case, `b` transitions to the next state /// where as `a` would transition to the dead state.) #[derive(Clone)] pub(crate) struct Accel { /// The first byte is the length. Subsequent bytes are the accelerated /// bytes. /// /// Note that we make every accelerator 8 bytes as a slightly wasteful /// way of making sure alignment is always correct for state ID sizes of /// 1, 2, 4 and 8. This should be okay since accelerated states aren't /// particularly common, especially when Unicode is enabled. bytes: [u8; ACCEL_CAP], } impl Accel { /// Returns an empty accel, where no bytes are accelerated. #[cfg(feature = "dfa-build")] pub fn new() -> Accel { Accel { bytes: [0; ACCEL_CAP] } } /// Returns a verified accelerator derived from the beginning of the given /// slice. /// /// If the slice is not long enough or contains invalid bytes for an /// accelerator, then this returns an error. pub fn from_slice(mut slice: &[u8]) -> Result<Accel, DeserializeError> { slice = &slice[..core::cmp::min(ACCEL_LEN, slice.len())]; let bytes = slice .try_into() .map_err(|_| DeserializeError::buffer_too_small("accelerator"))?; Accel::from_bytes(bytes) } /// Returns a verified accelerator derived from raw bytes. /// /// If the given bytes are invalid, then this returns an error. fn from_bytes(bytes: [u8; 4]) -> Result<Accel, DeserializeError> { if usize::from(bytes[0]) >= ACCEL_LEN { return Err(DeserializeError::generic( "accelerator bytes cannot have length more than 3", )); } Ok(Accel::from_bytes_unchecked(bytes)) } /// Returns an accelerator derived from raw bytes. /// /// This does not check whether the given bytes are valid. Invalid bytes /// cannot sacrifice memory safety, but may result in panics or silent /// logic bugs. fn from_bytes_unchecked(bytes: [u8; 4]) -> Accel { Accel { bytes: [bytes[0], bytes[1], bytes[2], bytes[3], 0, 0, 0, 0] } } /// Attempts to add the given byte to this accelerator. If the accelerator /// is already full or thinks the byte is a poor accelerator, then this /// returns false. Otherwise, returns true. /// /// If the given byte is already in this accelerator, then it panics. #[cfg(feature = "dfa-build")] pub fn add(&mut self, byte: u8) -> bool { if self.len() >= 3 { return false; } // As a special case, we totally reject trying to accelerate a state // with an ASCII space. In most cases, it occurs very frequently, and // tends to result in worse overall performance. if byte == b' ' { return false; } assert!( !self.contains(byte), "accelerator already contains {:?}", crate::util::escape::DebugByte(byte) ); self.bytes[self.len() + 1] = byte; self.bytes[0] += 1; true } /// Return the number of bytes in this accelerator. pub fn len(&self) -> usize { usize::from(self.bytes[0]) } /// Returns true if and only if there are no bytes in this accelerator. #[cfg(feature = "dfa-build")] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the slice of bytes to accelerate. /// /// If this accelerator is empty, then this returns an empty slice. fn needles(&self) -> &[u8] { &self.bytes[1..1 + self.len()] } /// Returns true if and only if this accelerator will accelerate the given /// byte. #[cfg(feature = "dfa-build")] fn contains(&self, byte: u8) -> bool { self.needles().iter().position(|&b| b == byte).is_some() } /// Returns the accelerator bytes as an array of AccelTys. #[cfg(feature = "dfa-build")] fn as_accel_tys(&self) -> [AccelTy; 2] { assert_eq!(ACCEL_CAP, 8); // These unwraps are OK since ACCEL_CAP is set to 8. let first = AccelTy::from_ne_bytes(self.bytes[0..4].try_into().unwrap()); let second = AccelTy::from_ne_bytes(self.bytes[4..8].try_into().unwrap()); [first, second] } } impl core::fmt::Debug for Accel { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "Accel(")?; let mut set = f.debug_set(); for &b in self.needles() { set.entry(&crate::util::escape::DebugByte(b)); } set.finish()?; write!(f, ")") } } <file_sep>/regex-automata/src/util/mod.rs /*! A collection of modules that provide APIs that are useful across many regex engines. While one should explore the sub-modules directly to get a sense of what's there, here are some highlights that tie the sub-modules to higher level use cases: * `alphabet` contains APIs that are useful if you're doing low level things with the DFAs in this crate. For example, implementing determinization or walking its state graph directly. * `captures` contains APIs for dealing with capture group matches and their mapping to "slots" used inside an NFA graph. This is also where you can find iterators over capture group names. * `escape` contains types for pretty-printing raw byte slices as strings. * `iter` contains API helpers for writing regex iterators. * `lazy` contains a no-std and no-alloc variant of `lazy_static!` and `once_cell`. * `look` contains APIs for matching and configuring look-around assertions. * `pool` provides a way to reuse mutable memory allocated in a thread safe manner. * `prefilter` provides APIs for building prefilters and using them in searches. * `primitives` are what you might use if you're doing lower level work on automata, such as walking an NFA state graph. * `syntax` provides some higher level convenience functions for interacting with the `regex-syntax` crate. * `wire` is useful if you're working with DFA serialization. */ pub mod alphabet; #[cfg(feature = "alloc")] pub mod captures; pub mod escape; #[cfg(feature = "alloc")] pub mod interpolate; pub mod iter; pub mod lazy; pub mod look; #[cfg(feature = "alloc")] pub mod pool; pub mod prefilter; pub mod primitives; #[cfg(feature = "syntax")] pub mod syntax; pub mod wire; #[cfg(any(feature = "dfa-build", feature = "hybrid"))] pub(crate) mod determinize; pub(crate) mod empty; pub(crate) mod int; pub(crate) mod memchr; pub(crate) mod search; #[cfg(feature = "alloc")] pub(crate) mod sparse_set; pub(crate) mod start; pub(crate) mod unicode_data; pub(crate) mod utf8; <file_sep>/regex-automata/src/util/int.rs /*! This module provides several integer oriented traits for converting between both fixed size integers and integers whose size varies based on the target (like `usize`). The driving design principle of this module is to attempt to centralize as many `as` casts as possible here. And in particular, we separate casts into two buckets: * Casts that we use for their truncating behavior. In this case, we use more descriptive names, like `low_u32` and `high_u32`. * Casts that we use for converting back-and-forth between `usize`. These conversions are generally necessary because we often store indices in different formats to save on memory, which requires converting to and from `usize`. In this case, we very specifically do not want to overflow, and so the methods defined here will panic if the `as` cast would be lossy in debug mode. (A normal `as` cast will never panic!) For `as` casts between raw pointers, we use `cast`, so `as` isn't needed there. For regex engines, floating point is just never used, so we don't have to worry about `as` casts for those. Otherwise, this module pretty much covers all of our `as` needs except for one thing: const contexts. There are a select few places in this crate where we still need to use `as` because const functions on traits aren't stable yet. If we wind up significantly expanding our const footprint in this crate, it might be worth defining free functions to handle those cases. But at the time of writing, that just seemed like too much ceremony. Instead, I comment each such use of `as` in a const context with a "fixme" notice. NOTE: for simplicity, we don't take target pointer width into account here for `usize` conversions. Since we currently only panic in debug mode, skipping the check when it can be proven it isn't needed at compile time doesn't really matter. Now, if we wind up wanting to do as many checks as possible in release mode, then we would want to skip those when we know the conversions are always non-lossy. NOTE: this module isn't an exhaustive API. For example, we still use things like `u64::from` where possible, or even `usize::try_from()` for when we do explicitly want to panic or when we want to return an error for overflow. */ pub(crate) trait U8 { fn as_usize(self) -> usize; } impl U8 for u8 { fn as_usize(self) -> usize { usize::from(self) } } pub(crate) trait U16 { fn as_usize(self) -> usize; fn low_u8(self) -> u8; fn high_u8(self) -> u8; } impl U16 for u16 { fn as_usize(self) -> usize { usize::from(self) } fn low_u8(self) -> u8 { self as u8 } fn high_u8(self) -> u8 { (self >> 8) as u8 } } pub(crate) trait U32 { fn as_usize(self) -> usize; fn low_u8(self) -> u8; fn low_u16(self) -> u16; fn high_u16(self) -> u16; } impl U32 for u32 { fn as_usize(self) -> usize { #[cfg(debug_assertions)] { usize::try_from(self).expect("u32 overflowed usize") } #[cfg(not(debug_assertions))] { self as usize } } fn low_u8(self) -> u8 { self as u8 } fn low_u16(self) -> u16 { self as u16 } fn high_u16(self) -> u16 { (self >> 16) as u16 } } pub(crate) trait U64 { fn as_usize(self) -> usize; fn low_u8(self) -> u8; fn low_u16(self) -> u16; fn low_u32(self) -> u32; fn high_u32(self) -> u32; } impl U64 for u64 { fn as_usize(self) -> usize { #[cfg(debug_assertions)] { usize::try_from(self).expect("u64 overflowed usize") } #[cfg(not(debug_assertions))] { self as usize } } fn low_u8(self) -> u8 { self as u8 } fn low_u16(self) -> u16 { self as u16 } fn low_u32(self) -> u32 { self as u32 } fn high_u32(self) -> u32 { (self >> 32) as u32 } } pub(crate) trait I32 { fn as_usize(self) -> usize; fn to_bits(self) -> u32; fn from_bits(n: u32) -> i32; } impl I32 for i32 { fn as_usize(self) -> usize { #[cfg(debug_assertions)] { usize::try_from(self).expect("i32 overflowed usize") } #[cfg(not(debug_assertions))] { self as usize } } fn to_bits(self) -> u32 { self as u32 } fn from_bits(n: u32) -> i32 { n as i32 } } pub(crate) trait Usize { fn as_u8(self) -> u8; fn as_u16(self) -> u16; fn as_u32(self) -> u32; fn as_u64(self) -> u64; } impl Usize for usize { fn as_u8(self) -> u8 { #[cfg(debug_assertions)] { u8::try_from(self).expect("usize overflowed u8") } #[cfg(not(debug_assertions))] { self as u8 } } fn as_u16(self) -> u16 { #[cfg(debug_assertions)] { u16::try_from(self).expect("usize overflowed u16") } #[cfg(not(debug_assertions))] { self as u16 } } fn as_u32(self) -> u32 { #[cfg(debug_assertions)] { u32::try_from(self).expect("usize overflowed u32") } #[cfg(not(debug_assertions))] { self as u32 } } fn as_u64(self) -> u64 { #[cfg(debug_assertions)] { u64::try_from(self).expect("usize overflowed u64") } #[cfg(not(debug_assertions))] { self as u64 } } } // Pointers aren't integers, but we convert pointers to integers to perform // offset arithmetic in some places. (And no, we don't convert the integers // back to pointers.) So add 'as_usize' conversions here too for completeness. // // These 'as' casts are actually okay because they're always non-lossy. But the // idea here is to just try and remove as much 'as' as possible, particularly // in this crate where we are being really paranoid about offsets and making // sure we don't panic on inputs that might be untrusted. This way, the 'as' // casts become easier to audit if they're all in one place, even when some of // them are actually okay 100% of the time. pub(crate) trait Pointer { fn as_usize(self) -> usize; } impl<T> Pointer for *const T { fn as_usize(self) -> usize { self as usize } } pub(crate) trait PointerMut { fn as_usize(self) -> usize; } impl<T> PointerMut for *mut T { fn as_usize(self) -> usize { self as usize } } <file_sep>/test #!/bin/bash set -e # cd to the directory containing this crate's Cargo.toml so that we don't need # to pass --manifest-path to every `cargo` command. cd "$(dirname "$0")" # This is a convenience script for running a broad swath of tests across # features. We don't test the complete space, since the complete space is quite # large. Hopefully once we migrate the test suite to better infrastructure # (like regex-automata), we'll be able to test more of the space. echo "===== DEFAULT FEATURES =====" cargo test # no-std mode is annoyingly difficult to test. Currently, the integration tests # don't run. So for now, we just test that library tests run. (There aren't # many because `regex` is just a wrapper crate.) cargo test --no-default-features --lib echo "===== DOC TESTS =====" cargo test --doc features=( "std" "std unicode" "std unicode-perl" "std perf" "std perf-cache" "std perf-dfa" "std perf-inline" "std perf-literal" "std perf-dfa-full" "std perf-onepass" "std perf-backtrack" ) for f in "${features[@]}"; do echo "===== FEATURE: $f =====" cargo test --test integration --no-default-features --features "$f" done # And test the probably-forever-nightly-only 'pattern' feature... if rustc --version | grep -q nightly; then echo "===== FEATURE: std,pattern,unicode-perl =====" cargo test --test integration --no-default-features --features std,pattern,unicode-perl fi <file_sep>/tests/suite_bytes.rs use { anyhow::Result, regex::bytes::{Regex, RegexBuilder}, regex_test::{ CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner, }, }; /// Tests the default configuration of the hybrid NFA/DFA. #[test] fn default() -> Result<()> { let mut runner = TestRunner::new()?; runner .expand(&["is_match", "find", "captures"], |test| test.compiles()) .blacklist_iter(super::BLACKLIST) .test_iter(crate::suite()?.iter(), compiler) .assert(); Ok(()) } fn run_test(re: &Regex, test: &RegexTest) -> TestResult { match test.additional_name() { "is_match" => TestResult::matched(re.is_match(test.haystack())), "find" => TestResult::matches( re.find_iter(test.haystack()) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: 0, span: Span { start: m.start(), end: m.end() }, }), ), "captures" => { let it = re .captures_iter(test.haystack()) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|caps| testify_captures(&caps)); TestResult::captures(it) } name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Converts the given regex test to a closure that searches with a /// `bytes::Regex`. If the test configuration is unsupported, then a /// `CompiledRegex` that skips the test is returned. fn compiler( test: &RegexTest, _patterns: &[String], ) -> anyhow::Result<CompiledRegex> { let skip = Ok(CompiledRegex::skip()); // We're only testing bytes::Regex here, which supports one pattern only. let pattern = match test.regexes().len() { 1 => &test.regexes()[0], _ => return skip, }; // We only test is_match, find_iter and captures_iter. All of those are // leftmost searches. if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) { return skip; } // The top-level single-pattern regex API always uses leftmost-first. if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) { return skip; } // The top-level regex API always runs unanchored searches. ... But we can // handle tests that are anchored but have only one match. if test.anchored() && test.match_limit() != Some(1) { return skip; } // We don't support tests with explicit search bounds. We could probably // support this by using the 'find_at' (and such) APIs. let bounds = test.bounds(); if !(bounds.start == 0 && bounds.end == test.haystack().len()) { return skip; } // The bytes::Regex API specifically does not support enabling UTF-8 mode. // It could I suppose, but currently it does not. That is, it permits // matches to have offsets that split codepoints. if test.utf8() { return skip; } // If the test requires Unicode but the Unicode feature isn't enabled, // skip it. This is a little aggressive, but the test suite doesn't // have any easy way of communicating which Unicode features are needed. if test.unicode() && !cfg!(feature = "unicode") { return skip; } let re = RegexBuilder::new(pattern) .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) .line_terminator(test.line_terminator()) .build()?; Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) } /// Convert `Captures` into the test suite's capture values. fn testify_captures( caps: &regex::bytes::Captures<'_>, ) -> regex_test::Captures { let spans = caps.iter().map(|group| { group.map(|m| regex_test::Span { start: m.start(), end: m.end() }) }); // This unwrap is OK because we assume our 'caps' represents a match, and // a match always gives a non-zero number of groups with the first group // being non-None. regex_test::Captures::new(0, spans).unwrap() } <file_sep>/regex-cli/args/mod.rs use std::{ fmt::{Debug, Display, Write}, str::FromStr, }; use { anyhow::Context, lexopt::{Arg, Parser, ValueExt}, }; pub mod api; pub mod backtrack; pub mod common; pub mod dfa; pub mod flags; pub mod haystack; pub mod hybrid; pub mod input; pub mod lite; pub mod meta; pub mod onepass; pub mod overlapping; pub mod patterns; pub mod pikevm; pub mod syntax; pub mod thompson; pub trait Configurable: Debug { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool>; fn usage(&self) -> &[Usage]; } pub fn configure( p: &mut Parser, usage: &str, targets: &mut [&mut dyn Configurable], ) -> anyhow::Result<()> { while let Some(arg) = p.next()? { match arg { Arg::Short('h') | Arg::Long("help") => { let mut usages = vec![]; for t in targets.iter() { usages.extend_from_slice(t.usage()); } usages.sort_by_key(|u| { u.format .split_once(", ") .map(|(_, long)| long) .unwrap_or(u.format) }); let options = if arg == Arg::Short('h') { Usage::short(&usages) } else { Usage::long(&usages) }; let usage = usage.replace("%options%", &options); anyhow::bail!("{}", usage.trim()); } _ => {} } // We do this little dance to disentangle the lifetime of 'p' from the // lifetime on 'arg'. The cost is that we have to clone all long flag // names to give it a place to live that isn't tied to 'p'. Annoying, // but not the end of the world. let long_flag: Option<String> = match arg { Arg::Long(name) => Some(name.to_string()), _ => None, }; let mut arg = match long_flag { Some(ref flag) => Arg::Long(flag), None => match arg { Arg::Short(c) => Arg::Short(c), Arg::Long(_) => unreachable!(), Arg::Value(value) => Arg::Value(value), }, }; // OK, now ask all of our targets whether they want this argument. let mut recognized = false; for t in targets.iter_mut() { if t.configure(p, &mut arg)? { recognized = true; break; } } if !recognized { return Err(arg.unexpected().into()); } } Ok(()) } /* pub struct AdHoc<'a> { usage: Usage, configure: Box<dyn FnMut(&mut Parser, &mut Arg) -> anyhow::Result<bool> + 'a>, } impl<'a> AdHoc<'a> { pub fn new( usage: Usage, configure: impl FnMut(&mut Parser, &mut Arg) -> anyhow::Result<bool> + 'a, ) -> AdHoc<'a> { AdHoc { usage, configure: Box::new(configure) } } } impl<'a> Configurable for AdHoc<'a> { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { (self.configure)(p, arg) } fn usage(&self) -> &[Usage] { std::slice::from_ref(&self.usage) } } impl<'a> Debug for AdHoc<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("AdHoc") .field("usage", &self.usage) .field("configure", &"FnMut(..)") .finish() } } */ /// Parses the argument from the given parser as a command name, and returns /// it. If the next arg isn't a simple value then this returns an error. /// /// This also handles the case where -h/--help is given, in which case, the /// given usage information is converted into an error and printed. pub fn next_as_command(usage: &str, p: &mut Parser) -> anyhow::Result<String> { let usage = usage.trim(); let arg = match p.next()? { Some(arg) => arg, None => anyhow::bail!("{}", usage), }; let cmd = match arg { Arg::Value(cmd) => cmd.string()?, Arg::Short('h') | Arg::Long("help") => anyhow::bail!("{}", usage), arg => return Err(arg.unexpected().into()), }; Ok(cmd) } /// Parses the next 'p.value()' into 'T'. Any error messages will include the /// given flag name in them. pub fn parse<T>(p: &mut Parser, flag_name: &'static str) -> anyhow::Result<T> where T: FromStr, <T as FromStr>::Err: Display + Debug + Send + Sync + 'static, { // This is written somewhat awkwardly and the type signature is also pretty // funky primarily because of the following two things: 1) the 'FromStr' // impls in this crate just use 'anyhow::Error' for their error type and 2) // 'anyhow::Error' does not impl 'std::error::Error'. let osv = p.value().context(flag_name)?; let strv = match osv.to_str() { Some(strv) => strv, None => { let err = lexopt::Error::NonUnicodeValue(osv.into()); return Err(anyhow::Error::from(err).context(flag_name)); } }; let parsed = match strv.parse() { Err(err) => return Err(anyhow::Error::msg(err).context(flag_name)), Ok(parsed) => parsed, }; Ok(parsed) } /// Like `parse`, but permits the string value "none" to indicate absent. This /// is useful for parsing things like limits, where "no limit" is a legal /// value. But it can be used for anything. pub fn parse_maybe<T>( p: &mut Parser, flag_name: &'static str, ) -> anyhow::Result<Option<T>> where T: FromStr, <T as FromStr>::Err: Display + Debug + Send + Sync + 'static, { // This is written somewhat awkwardly and the type signature is also pretty // funky primarily because of the following two things: 1) the 'FromStr' // impls in this crate just use 'anyhow::Error' for their error type and 2) // 'anyhow::Error' does not impl 'std::error::Error'. let osv = p.value().context(flag_name)?; let strv = match osv.to_str() { Some(strv) => strv, None => { let err = lexopt::Error::NonUnicodeValue(osv.into()); return Err(anyhow::Error::from(err).context(flag_name)); } }; if strv == "none" { return Ok(None); } let parsed = match strv.parse() { Err(err) => return Err(anyhow::Error::msg(err).context(flag_name)), Ok(parsed) => parsed, }; Ok(Some(parsed)) } /// A type for expressing the documentation of a flag. /// /// The `Usage::short` and `Usage::long` functions take a slice of usages and /// format them into a human readable display. It does simple word wrapping and /// column alignment for you. #[derive(Clone, Copy, Debug)] pub struct Usage { /// The format of the flag, for example, `-k, --match-kind <kind>`. pub format: &'static str, /// A very short description of the flag. Should fit on one line along with /// the format. pub short: &'static str, /// A longer form description of the flag. May be multiple paragraphs long /// (but doesn't have to be). pub long: &'static str, } impl Usage { /// Create a new usage from the given components. pub const fn new( format: &'static str, short: &'static str, long: &'static str, ) -> Usage { Usage { format, short, long } } /// Format a two column table from the given usages, where the first /// column is the format and the second column is the short description. pub fn short(usages: &[Usage]) -> String { const MIN_SPACE: usize = 2; let mut result = String::new(); let max_len = match usages.iter().map(|u| u.format.len()).max() { None => return result, Some(len) => len, }; for usage in usages.iter() { let padlen = MIN_SPACE + (max_len - usage.format.len()); let padding = " ".repeat(padlen); writeln!(result, " {}{}{}", usage.format, padding, usage.short) .unwrap(); } result } /// Print the format of each usage and its long description below the /// format. This also does appropriate indentation with the assumption that /// it is in an OPTIONS section of a bigger usage message. pub fn long(usages: &[Usage]) -> String { let wrap_opts = textwrap::Options::new(79) .initial_indent(" ") .subsequent_indent(" "); let mut result = String::new(); for (i, usage) in usages.iter().enumerate() { if i > 0 { writeln!(result, "").unwrap(); } writeln!(result, " {}", usage.format).unwrap(); for (i, paragraph) in usage.long.trim().split("\n\n").enumerate() { if i > 0 { result.push('\n'); } let flattened = paragraph.replace("\n", " "); for line in textwrap::wrap(&flattened, &wrap_opts) { result.push_str(&line); result.push('\n'); } } } result } } <file_sep>/regex-automata/src/nfa/thompson/compiler.rs use core::{borrow::Borrow, cell::RefCell}; use alloc::{sync::Arc, vec, vec::Vec}; use regex_syntax::{ hir::{self, Hir}, utf8::{Utf8Range, Utf8Sequences}, ParserBuilder, }; use crate::{ nfa::thompson::{ builder::Builder, error::BuildError, literal_trie::LiteralTrie, map::{Utf8BoundedMap, Utf8SuffixKey, Utf8SuffixMap}, nfa::{Transition, NFA}, range_trie::RangeTrie, }, util::{ look::{Look, LookMatcher}, primitives::{PatternID, StateID}, }, }; /// The configuration used for a Thompson NFA compiler. #[derive(Clone, Debug, Default)] pub struct Config { utf8: Option<bool>, reverse: Option<bool>, nfa_size_limit: Option<Option<usize>>, shrink: Option<bool>, which_captures: Option<WhichCaptures>, look_matcher: Option<LookMatcher>, #[cfg(test)] unanchored_prefix: Option<bool>, } impl Config { /// Return a new default Thompson NFA compiler configuration. pub fn new() -> Config { Config::default() } /// Whether to enable UTF-8 mode during search or not. /// /// A regex engine is said to be in UTF-8 mode when it guarantees that /// all matches returned by it have spans consisting of only valid UTF-8. /// That is, it is impossible for a match span to be returned that /// contains any invalid UTF-8. /// /// UTF-8 mode generally consists of two things: /// /// 1. Whether the NFA's states are constructed such that all paths to a /// match state that consume at least one byte always correspond to valid /// UTF-8. /// 2. Whether all paths to a match state that do _not_ consume any bytes /// should always correspond to valid UTF-8 boundaries. /// /// (1) is a guarantee made by whoever constructs the NFA. /// If you're parsing a regex from its concrete syntax, then /// [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) can make /// this guarantee for you. It does it by returning an error if the regex /// pattern could every report a non-empty match span that contains invalid /// UTF-8. So long as `syntax::Config::utf8` mode is enabled and your regex /// successfully parses, then you're guaranteed that the corresponding NFA /// will only ever report non-empty match spans containing valid UTF-8. /// /// (2) is a trickier guarantee because it cannot be enforced by the NFA /// state graph itself. Consider, for example, the regex `a*`. It matches /// the empty strings in `☃` at positions `0`, `1`, `2` and `3`, where /// positions `1` and `2` occur within the UTF-8 encoding of a codepoint, /// and thus correspond to invalid UTF-8 boundaries. Therefore, this /// guarantee must be made at a higher level than the NFA state graph /// itself. This crate deals with this case in each regex engine. Namely, /// when a zero-width match that splits a codepoint is found and UTF-8 /// mode enabled, then it is ignored and the engine moves on looking for /// the next match. /// /// Thus, UTF-8 mode is both a promise that the NFA built only reports /// non-empty matches that are valid UTF-8, and an *instruction* to regex /// engines that empty matches that split codepoints should be banned. /// /// Because UTF-8 mode is fundamentally about avoiding invalid UTF-8 spans, /// it only makes sense to enable this option when you *know* your haystack /// is valid UTF-8. (For example, a `&str`.) Enabling UTF-8 mode and /// searching a haystack that contains invalid UTF-8 leads to **unspecified /// behavior**. /// /// Therefore, it may make sense to enable `syntax::Config::utf8` while /// simultaneously *disabling* this option. That would ensure all non-empty /// match spans are valid UTF-8, but that empty match spans may still split /// a codepoint or match at other places that aren't valid UTF-8. /// /// In general, this mode is only relevant if your regex can match the /// empty string. Most regexes don't. /// /// This is enabled by default. /// /// # Example /// /// This example shows how UTF-8 mode can impact the match spans that may /// be reported in certain cases. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, pikevm::PikeVM}, /// Match, Input, /// }; /// /// let re = PikeVM::new("")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// // UTF-8 mode is enabled by default. /// let mut input = Input::new("☃"); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 0..0)), caps.get_match()); /// /// // Even though an empty regex matches at 1..1, our next match is /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is /// // three bytes long). /// input.set_start(1); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); /// /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: /// let re = PikeVM::builder() /// .thompson(thompson::Config::new().utf8(false)) /// .build("")?; /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 1..1)), caps.get_match()); /// /// input.set_start(2); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 2..2)), caps.get_match()); /// /// input.set_start(3); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); /// /// input.set_start(4); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(None, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn utf8(mut self, yes: bool) -> Config { self.utf8 = Some(yes); self } /// Reverse the NFA. /// /// A NFA reversal is performed by reversing all of the concatenated /// sub-expressions in the original pattern, recursively. (Look around /// operators are also inverted.) The resulting NFA can be used to match /// the pattern starting from the end of a string instead of the beginning /// of a string. /// /// Reversing the NFA is useful for building a reverse DFA, which is most /// useful for finding the start of a match after its ending position has /// been found. NFA execution engines typically do not work on reverse /// NFAs. For example, currently, the Pike VM reports the starting location /// of matches without a reverse NFA. /// /// Currently, enabling this setting requires disabling the /// [`captures`](Config::captures) setting. If both are enabled, then the /// compiler will return an error. It is expected that this limitation will /// be lifted in the future. /// /// This is disabled by default. /// /// # Example /// /// This example shows how to build a DFA from a reverse NFA, and then use /// the DFA to search backwards. /// /// ``` /// use regex_automata::{ /// dfa::{self, Automaton}, /// nfa::thompson::{NFA, WhichCaptures}, /// HalfMatch, Input, /// }; /// /// let dfa = dfa::dense::Builder::new() /// .thompson(NFA::config() /// .which_captures(WhichCaptures::None) /// .reverse(true) /// ) /// .build("baz[0-9]+")?; /// let expected = Some(HalfMatch::must(0, 3)); /// assert_eq!( /// expected, /// dfa.try_search_rev(&Input::new("foobaz12345bar"))?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reverse(mut self, yes: bool) -> Config { self.reverse = Some(yes); self } /// Sets an approximate size limit on the total heap used by the NFA being /// compiled. /// /// This permits imposing constraints on the size of a compiled NFA. This /// may be useful in contexts where the regex pattern is untrusted and one /// wants to avoid using too much memory. /// /// This size limit does not apply to auxiliary heap used during /// compilation that is not part of the built NFA. /// /// Note that this size limit is applied during compilation in order for /// the limit to prevent too much heap from being used. However, the /// implementation may use an intermediate NFA representation that is /// otherwise slightly bigger than the final public form. Since the size /// limit may be applied to an intermediate representation, there is not /// necessarily a precise correspondence between the configured size limit /// and the heap usage of the final NFA. /// /// There is no size limit by default. /// /// # Example /// /// This example demonstrates how Unicode mode can greatly increase the /// size of the NFA. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::NFA; /// /// // 300KB isn't enough! /// NFA::compiler() /// .configure(NFA::config().nfa_size_limit(Some(300_000))) /// .build(r"\w{20}") /// .unwrap_err(); /// /// // ... but 400KB probably is. /// let nfa = NFA::compiler() /// .configure(NFA::config().nfa_size_limit(Some(400_000))) /// .build(r"\w{20}")?; /// /// assert_eq!(nfa.pattern_len(), 1); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn nfa_size_limit(mut self, bytes: Option<usize>) -> Config { self.nfa_size_limit = Some(bytes); self } /// Apply best effort heuristics to shrink the NFA at the expense of more /// time/memory. /// /// Generally speaking, if one is using an NFA to compile a DFA, then the /// extra time used to shrink the NFA will be more than made up for during /// DFA construction (potentially by a lot). In other words, enabling this /// can substantially decrease the overall amount of time it takes to build /// a DFA. /// /// A reason to keep this disabled is if you want to compile an NFA and /// start using it as quickly as possible without needing to build a DFA, /// and you don't mind using a bit of extra memory for the NFA. e.g., for /// an NFA simulation or for a lazy DFA. /// /// NFA shrinking is currently most useful when compiling a reverse /// NFA with large Unicode character classes. In particular, it trades /// additional CPU time during NFA compilation in favor of generating fewer /// NFA states. /// /// This is disabled by default because it can increase compile times /// quite a bit if you aren't building a full DFA. /// /// # Example /// /// This example shows that NFA shrinking can lead to substantial space /// savings in some cases. Notice that, as noted above, we build a reverse /// DFA and use a pattern with a large Unicode character class. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; /// /// // Currently we have to disable captures when enabling reverse NFA. /// let config = NFA::config() /// .which_captures(WhichCaptures::None) /// .reverse(true); /// let not_shrunk = NFA::compiler() /// .configure(config.clone().shrink(false)) /// .build(r"\w")?; /// let shrunk = NFA::compiler() /// .configure(config.clone().shrink(true)) /// .build(r"\w")?; /// /// // While a specific shrink factor is not guaranteed, the savings can be /// // considerable in some cases. /// assert!(shrunk.states().len() * 2 < not_shrunk.states().len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn shrink(mut self, yes: bool) -> Config { self.shrink = Some(yes); self } /// Whether to include 'Capture' states in the NFA. /// /// Currently, enabling this setting requires disabling the /// [`reverse`](Config::reverse) setting. If both are enabled, then the /// compiler will return an error. It is expected that this limitation will /// be lifted in the future. /// /// This is enabled by default. /// /// # Example /// /// This example demonstrates that some regex engines, like the Pike VM, /// require capturing states to be present in the NFA to report match /// offsets. /// /// (Note that since this method is deprecated, the example below uses /// [`Config::which_captures`] to disable capture states.) /// /// ``` /// use regex_automata::nfa::thompson::{ /// pikevm::PikeVM, /// NFA, /// WhichCaptures, /// }; /// /// let re = PikeVM::builder() /// .thompson(NFA::config().which_captures(WhichCaptures::None)) /// .build(r"[a-z]+")?; /// let mut cache = re.create_cache(); /// /// assert!(re.is_match(&mut cache, "abc")); /// assert_eq!(None, re.find(&mut cache, "abc")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[deprecated(since = "0.3.5", note = "use which_captures instead")] pub fn captures(self, yes: bool) -> Config { self.which_captures(if yes { WhichCaptures::All } else { WhichCaptures::None }) } /// Configures what kinds of capture groups are compiled into /// [`State::Capture`](crate::nfa::thompson::State::Capture) states in a /// Thompson NFA. /// /// Currently, using any option except for [`WhichCaptures::None`] requires /// disabling the [`reverse`](Config::reverse) setting. If both are /// enabled, then the compiler will return an error. It is expected that /// this limitation will be lifted in the future. /// /// This is set to [`WhichCaptures::All`] by default. Callers may wish to /// use [`WhichCaptures::Implicit`] in cases where one wants avoid the /// overhead of capture states for explicit groups. Usually this occurs /// when one wants to use the `PikeVM` only for determining the overall /// match. Otherwise, the `PikeVM` could use much more memory than is /// necessary. /// /// # Example /// /// This example demonstrates that some regex engines, like the Pike VM, /// require capturing states to be present in the NFA to report match /// offsets. /// /// ``` /// use regex_automata::nfa::thompson::{ /// pikevm::PikeVM, /// NFA, /// WhichCaptures, /// }; /// /// let re = PikeVM::builder() /// .thompson(NFA::config().which_captures(WhichCaptures::None)) /// .build(r"[a-z]+")?; /// let mut cache = re.create_cache(); /// /// assert!(re.is_match(&mut cache, "abc")); /// assert_eq!(None, re.find(&mut cache, "abc")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// The same applies to the bounded backtracker: /// /// ``` /// use regex_automata::nfa::thompson::{ /// backtrack::BoundedBacktracker, /// NFA, /// WhichCaptures, /// }; /// /// let re = BoundedBacktracker::builder() /// .thompson(NFA::config().which_captures(WhichCaptures::None)) /// .build(r"[a-z]+")?; /// let mut cache = re.create_cache(); /// /// assert!(re.try_is_match(&mut cache, "abc")?); /// assert_eq!(None, re.try_find(&mut cache, "abc")?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn which_captures(mut self, which_captures: WhichCaptures) -> Config { self.which_captures = Some(which_captures); self } /// Sets the look-around matcher that should be used with this NFA. /// /// A look-around matcher determines how to match look-around assertions. /// In particular, some assertions are configurable. For example, the /// `(?m:^)` and `(?m:$)` assertions can have their line terminator changed /// from the default of `\n` to any other byte. /// /// # Example /// /// This shows how to change the line terminator for multi-line assertions. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, pikevm::PikeVM}, /// util::look::LookMatcher, /// Match, Input, /// }; /// /// let mut lookm = LookMatcher::new(); /// lookm.set_line_terminator(b'\x00'); /// /// let re = PikeVM::builder() /// .thompson(thompson::Config::new().look_matcher(lookm)) /// .build(r"(?m)^[a-z]+$")?; /// let mut cache = re.create_cache(); /// /// // Multi-line assertions now use NUL as a terminator. /// assert_eq!( /// Some(Match::must(0, 1..4)), /// re.find(&mut cache, b"\x00abc\x00"), /// ); /// // ... and \n is no longer recognized as a terminator. /// assert_eq!( /// None, /// re.find(&mut cache, b"\nabc\n"), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn look_matcher(mut self, m: LookMatcher) -> Config { self.look_matcher = Some(m); self } /// Whether to compile an unanchored prefix into this NFA. /// /// This is enabled by default. It is made available for tests only to make /// it easier to unit test the output of the compiler. #[cfg(test)] fn unanchored_prefix(mut self, yes: bool) -> Config { self.unanchored_prefix = Some(yes); self } /// Returns whether this configuration has enabled UTF-8 mode. pub fn get_utf8(&self) -> bool { self.utf8.unwrap_or(true) } /// Returns whether this configuration has enabled reverse NFA compilation. pub fn get_reverse(&self) -> bool { self.reverse.unwrap_or(false) } /// Return the configured NFA size limit, if it exists, in the number of /// bytes of heap used. pub fn get_nfa_size_limit(&self) -> Option<usize> { self.nfa_size_limit.unwrap_or(None) } /// Return whether NFA shrinking is enabled. pub fn get_shrink(&self) -> bool { self.shrink.unwrap_or(false) } /// Return whether NFA compilation is configured to produce capture states. #[deprecated(since = "0.3.5", note = "use get_which_captures instead")] pub fn get_captures(&self) -> bool { self.get_which_captures().is_any() } /// Return what kinds of capture states will be compiled into an NFA. pub fn get_which_captures(&self) -> WhichCaptures { self.which_captures.unwrap_or(WhichCaptures::All) } /// Return the look-around matcher for this NFA. pub fn get_look_matcher(&self) -> LookMatcher { self.look_matcher.clone().unwrap_or(LookMatcher::default()) } /// Return whether NFA compilation is configured to include an unanchored /// prefix. /// /// This is always false when not in test mode. fn get_unanchored_prefix(&self) -> bool { #[cfg(test)] { self.unanchored_prefix.unwrap_or(true) } #[cfg(not(test))] { true } } /// Overwrite the default configuration such that the options in `o` are /// always used. If an option in `o` is not set, then the corresponding /// option in `self` is used. If it's not set in `self` either, then it /// remains not set. pub(crate) fn overwrite(&self, o: Config) -> Config { Config { utf8: o.utf8.or(self.utf8), reverse: o.reverse.or(self.reverse), nfa_size_limit: o.nfa_size_limit.or(self.nfa_size_limit), shrink: o.shrink.or(self.shrink), which_captures: o.which_captures.or(self.which_captures), look_matcher: o.look_matcher.or_else(|| self.look_matcher.clone()), #[cfg(test)] unanchored_prefix: o.unanchored_prefix.or(self.unanchored_prefix), } } } /// A configuration indicating which kinds of /// [`State::Capture`](crate::nfa::thompson::State::Capture) states to include. /// /// This configuration can be used with [`Config::which_captures`] to control /// which capture states are compiled into a Thompson NFA. /// /// The default configuration is [`WhichCaptures::All`]. #[derive(Clone, Copy, Debug)] pub enum WhichCaptures { /// All capture states, including those corresponding to both implicit and /// explicit capture groups, are included in the Thompson NFA. All, /// Only capture states corresponding to implicit capture groups are /// included. Implicit capture groups appear in every pattern implicitly /// and correspond to the overall match of a pattern. /// /// This is useful when one only cares about the overall match of a /// pattern. By excluding capture states from explicit capture groups, /// one might be able to reduce the memory usage of a multi-pattern regex /// substantially if it was otherwise written to have many explicit capture /// groups. Implicit, /// No capture states are compiled into the Thompson NFA. /// /// This is useful when capture states are either not needed (for example, /// if one is only trying to build a DFA) or if they aren't supported (for /// example, a reverse NFA). None, } impl Default for WhichCaptures { fn default() -> WhichCaptures { WhichCaptures::All } } impl WhichCaptures { /// Returns true if this configuration indicates that no capture states /// should be produced in an NFA. pub fn is_none(&self) -> bool { matches!(*self, WhichCaptures::None) } /// Returns true if this configuration indicates that some capture states /// should be added to an NFA. Note that this might only include capture /// states for implicit capture groups. pub fn is_any(&self) -> bool { !self.is_none() } } /* This compiler below uses Thompson's construction algorithm. The compiler takes a regex-syntax::Hir as input and emits an NFA graph as output. The NFA graph is structured in a way that permits it to be executed by a virtual machine and also used to efficiently build a DFA. The compiler deals with a slightly expanded set of NFA states than what is in a final NFA (as exhibited by builder::State and nfa::State). Notably a compiler state includes an empty node that has exactly one unconditional epsilon transition to the next state. In other words, it's a "goto" instruction if one views Thompson's NFA as a set of bytecode instructions. These goto instructions are removed in a subsequent phase before returning the NFA to the caller. The purpose of these empty nodes is that they make the construction algorithm substantially simpler to implement. We remove them before returning to the caller because they can represent substantial overhead when traversing the NFA graph (either while searching using the NFA directly or while building a DFA). In the future, it would be nice to provide a Glushkov compiler as well, as it would work well as a bit-parallel NFA for smaller regexes. But the Thompson construction is one I'm more familiar with and seems more straight-forward to deal with when it comes to large Unicode character classes. Internally, the compiler uses interior mutability to improve composition in the face of the borrow checker. In particular, we'd really like to be able to write things like this: self.c_concat(exprs.iter().map(|e| self.c(e))) Which elegantly uses iterators to build up a sequence of compiled regex sub-expressions and then hands it off to the concatenating compiler routine. Without interior mutability, the borrow checker won't let us borrow `self` mutably both inside and outside the closure at the same time. */ /// A builder for compiling an NFA from a regex's high-level intermediate /// representation (HIR). /// /// This compiler provides a way to translate a parsed regex pattern into an /// NFA state graph. The NFA state graph can either be used directly to execute /// a search (e.g., with a Pike VM), or it can be further used to build a DFA. /// /// This compiler provides APIs both for compiling regex patterns directly from /// their concrete syntax, or via a [`regex_syntax::hir::Hir`]. /// /// This compiler has various options that may be configured via /// [`thompson::Config`](Config). /// /// Note that a compiler is not the same as a [`thompson::Builder`](Builder). /// A `Builder` provides a lower level API that is uncoupled from a regex /// pattern's concrete syntax or even its HIR. Instead, it permits stitching /// together an NFA by hand. See its docs for examples. /// /// # Example: compilation from concrete syntax /// /// This shows how to compile an NFA from a pattern string while setting a size /// limit on how big the NFA is allowed to be (in terms of bytes of heap used). /// /// ``` /// use regex_automata::{ /// nfa::thompson::{NFA, pikevm::PikeVM}, /// Match, /// }; /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; /// /// let re = PikeVM::new_from_nfa(nfa)?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// let expected = Some(Match::must(0, 3..4)); /// re.captures(&mut cache, "!@#A#@!", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: compilation from HIR /// /// This shows how to hand assemble a regular expression via its HIR, and then /// compile an NFA directly from it. /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; /// /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ /// ClassBytesRange::new(b'0', b'9'), /// ClassBytesRange::new(b'A', b'Z'), /// ClassBytesRange::new(b'_', b'_'), /// ClassBytesRange::new(b'a', b'z'), /// ]))); /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; /// /// let re = PikeVM::new_from_nfa(nfa)?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// let expected = Some(Match::must(0, 3..4)); /// re.captures(&mut cache, "!@#A#@!", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Compiler { /// A regex parser, used when compiling an NFA directly from a pattern /// string. parser: ParserBuilder, /// The compiler configuration. config: Config, /// The builder for actually constructing an NFA. This provides a /// convenient abstraction for writing a compiler. builder: RefCell<Builder>, /// State used for compiling character classes to UTF-8 byte automata. /// State is not retained between character class compilations. This just /// serves to amortize allocation to the extent possible. utf8_state: RefCell<Utf8State>, /// State used for arranging character classes in reverse into a trie. trie_state: RefCell<RangeTrie>, /// State used for caching common suffixes when compiling reverse UTF-8 /// automata (for Unicode character classes). utf8_suffix: RefCell<Utf8SuffixMap>, } impl Compiler { /// Create a new NFA builder with its default configuration. pub fn new() -> Compiler { Compiler { parser: ParserBuilder::new(), config: Config::default(), builder: RefCell::new(Builder::new()), utf8_state: RefCell::new(Utf8State::new()), trie_state: RefCell::new(RangeTrie::new()), utf8_suffix: RefCell::new(Utf8SuffixMap::new(1000)), } } /// Compile the given regular expression pattern into an NFA. /// /// If there was a problem parsing the regex, then that error is returned. /// /// Otherwise, if there was a problem building the NFA, then an error is /// returned. The only error that can occur is if the compiled regex would /// exceed the size limits configured on this builder, or if any part of /// the NFA would exceed the integer representations used. (For example, /// too many states might plausibly occur on a 16-bit target.) /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; /// /// let re = PikeVM::new_from_nfa(nfa)?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// let expected = Some(Match::must(0, 3..4)); /// re.captures(&mut cache, "!@#A#@!", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build(&self, pattern: &str) -> Result<NFA, BuildError> { self.build_many(&[pattern]) } /// Compile the given regular expression patterns into a single NFA. /// /// When matches are returned, the pattern ID corresponds to the index of /// the pattern in the slice given. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build_many(&[ /// r"(?-u)\s", /// r"(?-u)\w", /// ])?; /// /// let re = PikeVM::new_from_nfa(nfa)?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// let expected = Some(Match::must(1, 1..2)); /// re.captures(&mut cache, "!A! !A!", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<NFA, BuildError> { let mut hirs = vec![]; for p in patterns { hirs.push( self.parser .build() .parse(p.as_ref()) .map_err(BuildError::syntax)?, ); debug!("parsed: {:?}", p.as_ref()); } self.build_many_from_hir(&hirs) } /// Compile the given high level intermediate representation of a regular /// expression into an NFA. /// /// If there was a problem building the NFA, then an error is returned. The /// only error that can occur is if the compiled regex would exceed the /// size limits configured on this builder, or if any part of the NFA would /// exceed the integer representations used. (For example, too many states /// might plausibly occur on a 16-bit target.) /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; /// /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ /// ClassBytesRange::new(b'0', b'9'), /// ClassBytesRange::new(b'A', b'Z'), /// ClassBytesRange::new(b'_', b'_'), /// ClassBytesRange::new(b'a', b'z'), /// ]))); /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; /// /// let re = PikeVM::new_from_nfa(nfa)?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// let expected = Some(Match::must(0, 3..4)); /// re.captures(&mut cache, "!@#A#@!", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_from_hir(&self, expr: &Hir) -> Result<NFA, BuildError> { self.build_many_from_hir(&[expr]) } /// Compile the given high level intermediate representations of regular /// expressions into a single NFA. /// /// When matches are returned, the pattern ID corresponds to the index of /// the pattern in the slice given. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; /// /// let hirs = &[ /// Hir::class(Class::Bytes(ClassBytes::new(vec![ /// ClassBytesRange::new(b'\t', b'\r'), /// ClassBytesRange::new(b' ', b' '), /// ]))), /// Hir::class(Class::Bytes(ClassBytes::new(vec![ /// ClassBytesRange::new(b'0', b'9'), /// ClassBytesRange::new(b'A', b'Z'), /// ClassBytesRange::new(b'_', b'_'), /// ClassBytesRange::new(b'a', b'z'), /// ]))), /// ]; /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build_many_from_hir(hirs)?; /// /// let re = PikeVM::new_from_nfa(nfa)?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// let expected = Some(Match::must(1, 1..2)); /// re.captures(&mut cache, "!A! !A!", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_many_from_hir<H: Borrow<Hir>>( &self, exprs: &[H], ) -> Result<NFA, BuildError> { self.compile(exprs) } /// Apply the given NFA configuration options to this builder. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::NFA; /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; /// assert_eq!(nfa.pattern_len(), 1); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn configure(&mut self, config: Config) -> &mut Compiler { self.config = self.config.overwrite(config); self } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. /// /// This syntax configuration only applies when an NFA is built directly /// from a pattern string. If an NFA is built from an HIR, then all syntax /// settings are ignored. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::syntax}; /// /// let syntax_config = syntax::Config::new().unicode(false); /// let nfa = NFA::compiler().syntax(syntax_config).build(r"\w")?; /// // If Unicode were enabled, the number of states would be much bigger. /// assert!(nfa.states().len() < 15); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Compiler { config.apply(&mut self.parser); self } } impl Compiler { /// Compile the sequence of HIR expressions given. Pattern IDs are /// allocated starting from 0, in correspondence with the slice given. /// /// It is legal to provide an empty slice. In that case, the NFA returned /// has no patterns and will never match anything. fn compile<H: Borrow<Hir>>(&self, exprs: &[H]) -> Result<NFA, BuildError> { if exprs.len() > PatternID::LIMIT { return Err(BuildError::too_many_patterns(exprs.len())); } if self.config.get_reverse() && self.config.get_which_captures().is_any() { return Err(BuildError::unsupported_captures()); } self.builder.borrow_mut().clear(); self.builder.borrow_mut().set_utf8(self.config.get_utf8()); self.builder.borrow_mut().set_reverse(self.config.get_reverse()); self.builder .borrow_mut() .set_look_matcher(self.config.get_look_matcher()); self.builder .borrow_mut() .set_size_limit(self.config.get_nfa_size_limit())?; // We always add an unanchored prefix unless we were specifically told // not to (for tests only), or if we know that the regex is anchored // for all matches. When an unanchored prefix is not added, then the // NFA's anchored and unanchored start states are equivalent. let all_anchored = exprs.iter().all(|e| { e.borrow() .properties() .look_set_prefix() .contains(hir::Look::Start) }); let anchored = !self.config.get_unanchored_prefix() || all_anchored; let unanchored_prefix = if anchored { self.c_empty()? } else { self.c_at_least(&Hir::dot(hir::Dot::AnyByte), false, 0)? }; let compiled = self.c_alt_iter(exprs.iter().map(|e| { let _ = self.start_pattern()?; let one = self.c_cap(0, None, e.borrow())?; let match_state_id = self.add_match()?; self.patch(one.end, match_state_id)?; let _ = self.finish_pattern(one.start)?; Ok(ThompsonRef { start: one.start, end: match_state_id }) }))?; self.patch(unanchored_prefix.end, compiled.start)?; let nfa = self .builder .borrow_mut() .build(compiled.start, unanchored_prefix.start)?; debug!("HIR-to-NFA compilation complete, config: {:?}", self.config); Ok(nfa) } /// Compile an arbitrary HIR expression. fn c(&self, expr: &Hir) -> Result<ThompsonRef, BuildError> { use regex_syntax::hir::{Class, HirKind::*}; match *expr.kind() { Empty => self.c_empty(), Literal(hir::Literal(ref bytes)) => self.c_literal(bytes), Class(Class::Bytes(ref c)) => self.c_byte_class(c), Class(Class::Unicode(ref c)) => self.c_unicode_class(c), Look(ref look) => self.c_look(look), Repetition(ref rep) => self.c_repetition(rep), Capture(ref c) => self.c_cap(c.index, c.name.as_deref(), &c.sub), Concat(ref es) => self.c_concat(es.iter().map(|e| self.c(e))), Alternation(ref es) => self.c_alt_slice(es), } } /// Compile a concatenation of the sub-expressions yielded by the given /// iterator. If the iterator yields no elements, then this compiles down /// to an "empty" state that always matches. /// /// If the compiler is in reverse mode, then the expressions given are /// automatically compiled in reverse. fn c_concat<I>(&self, mut it: I) -> Result<ThompsonRef, BuildError> where I: DoubleEndedIterator<Item = Result<ThompsonRef, BuildError>>, { let first = if self.is_reverse() { it.next_back() } else { it.next() }; let ThompsonRef { start, mut end } = match first { Some(result) => result?, None => return self.c_empty(), }; loop { let next = if self.is_reverse() { it.next_back() } else { it.next() }; let compiled = match next { Some(result) => result?, None => break, }; self.patch(end, compiled.start)?; end = compiled.end; } Ok(ThompsonRef { start, end }) } /// Compile an alternation of the given HIR values. /// /// This is like 'c_alt_iter', but it accepts a slice of HIR values instead /// of an iterator of compiled NFA subgraphs. The point of accepting a /// slice here is that it opens up some optimization opportunities. For /// example, if all of the HIR values are literals, then this routine might /// re-shuffle them to make NFA epsilon closures substantially faster. fn c_alt_slice(&self, exprs: &[Hir]) -> Result<ThompsonRef, BuildError> { // self.c_alt_iter(exprs.iter().map(|e| self.c(e))) let literal_count = exprs .iter() .filter(|e| { matches!(*e.kind(), hir::HirKind::Literal(hir::Literal(_))) }) .count(); if literal_count <= 1 || literal_count < exprs.len() { return self.c_alt_iter(exprs.iter().map(|e| self.c(e))); } let mut trie = if self.is_reverse() { LiteralTrie::reverse() } else { LiteralTrie::forward() }; for expr in exprs.iter() { let literal = match *expr.kind() { hir::HirKind::Literal(hir::Literal(ref bytes)) => bytes, _ => unreachable!(), }; trie.add(literal)?; } trie.compile(&mut self.builder.borrow_mut()) } /// Compile an alternation, where each element yielded by the given /// iterator represents an item in the alternation. If the iterator yields /// no elements, then this compiles down to a "fail" state. /// /// In an alternation, expressions appearing earlier are "preferred" at /// match time over expressions appearing later. At least, this is true /// when using "leftmost first" match semantics. (If "leftmost longest" are /// ever added in the future, then this preference order of priority would /// not apply in that mode.) fn c_alt_iter<I>(&self, mut it: I) -> Result<ThompsonRef, BuildError> where I: Iterator<Item = Result<ThompsonRef, BuildError>>, { let first = match it.next() { None => return self.c_fail(), Some(result) => result?, }; let second = match it.next() { None => return Ok(first), Some(result) => result?, }; let union = self.add_union()?; let end = self.add_empty()?; self.patch(union, first.start)?; self.patch(first.end, end)?; self.patch(union, second.start)?; self.patch(second.end, end)?; for result in it { let compiled = result?; self.patch(union, compiled.start)?; self.patch(compiled.end, end)?; } Ok(ThompsonRef { start: union, end }) } /// Compile the given capture sub-expression. `expr` should be the /// sub-expression contained inside the capture. If "capture" states are /// enabled, then they are added as appropriate. /// /// This accepts the pieces of a capture instead of a `hir::Capture` so /// that it's easy to manufacture a "fake" group when necessary, e.g., for /// adding the entire pattern as if it were a group in order to create /// appropriate "capture" states in the NFA. fn c_cap( &self, index: u32, name: Option<&str>, expr: &Hir, ) -> Result<ThompsonRef, BuildError> { match self.config.get_which_captures() { // No capture states means we always skip them. WhichCaptures::None => return self.c(expr), // Implicit captures states means we only add when index==0 since // index==0 implies the group is implicit. WhichCaptures::Implicit if index > 0 => return self.c(expr), _ => {} } let start = self.add_capture_start(index, name)?; let inner = self.c(expr)?; let end = self.add_capture_end(index)?; self.patch(start, inner.start)?; self.patch(inner.end, end)?; Ok(ThompsonRef { start, end }) } /// Compile the given repetition expression. This handles all types of /// repetitions and greediness. fn c_repetition( &self, rep: &hir::Repetition, ) -> Result<ThompsonRef, BuildError> { match (rep.min, rep.max) { (0, Some(1)) => self.c_zero_or_one(&rep.sub, rep.greedy), (min, None) => self.c_at_least(&rep.sub, rep.greedy, min), (min, Some(max)) if min == max => self.c_exactly(&rep.sub, min), (min, Some(max)) => self.c_bounded(&rep.sub, rep.greedy, min, max), } } /// Compile the given expression such that it matches at least `min` times, /// but no more than `max` times. /// /// When `greedy` is true, then the preference is for the expression to /// match as much as possible. Otherwise, it will match as little as /// possible. fn c_bounded( &self, expr: &Hir, greedy: bool, min: u32, max: u32, ) -> Result<ThompsonRef, BuildError> { let prefix = self.c_exactly(expr, min)?; if min == max { return Ok(prefix); } // It is tempting here to compile the rest here as a concatenation // of zero-or-one matches. i.e., for `a{2,5}`, compile it as if it // were `aaa?a?a?`. The problem here is that it leads to this program: // // >000000: 61 => 01 // 000001: 61 => 02 // 000002: union(03, 04) // 000003: 61 => 04 // 000004: union(05, 06) // 000005: 61 => 06 // 000006: union(07, 08) // 000007: 61 => 08 // 000008: MATCH // // And effectively, once you hit state 2, the epsilon closure will // include states 3, 5, 6, 7 and 8, which is quite a bit. It is better // to instead compile it like so: // // >000000: 61 => 01 // 000001: 61 => 02 // 000002: union(03, 08) // 000003: 61 => 04 // 000004: union(05, 08) // 000005: 61 => 06 // 000006: union(07, 08) // 000007: 61 => 08 // 000008: MATCH // // So that the epsilon closure of state 2 is now just 3 and 8. let empty = self.add_empty()?; let mut prev_end = prefix.end; for _ in min..max { let union = if greedy { self.add_union() } else { self.add_union_reverse() }?; let compiled = self.c(expr)?; self.patch(prev_end, union)?; self.patch(union, compiled.start)?; self.patch(union, empty)?; prev_end = compiled.end; } self.patch(prev_end, empty)?; Ok(ThompsonRef { start: prefix.start, end: empty }) } /// Compile the given expression such that it may be matched `n` or more /// times, where `n` can be any integer. (Although a particularly large /// integer is likely to run afoul of any configured size limits.) /// /// When `greedy` is true, then the preference is for the expression to /// match as much as possible. Otherwise, it will match as little as /// possible. fn c_at_least( &self, expr: &Hir, greedy: bool, n: u32, ) -> Result<ThompsonRef, BuildError> { if n == 0 { // When the expression cannot match the empty string, then we // can get away with something much simpler: just one 'alt' // instruction that optionally repeats itself. But if the expr // can match the empty string... see below. if expr.properties().minimum_len().map_or(false, |len| len > 0) { let union = if greedy { self.add_union() } else { self.add_union_reverse() }?; let compiled = self.c(expr)?; self.patch(union, compiled.start)?; self.patch(compiled.end, union)?; return Ok(ThompsonRef { start: union, end: union }); } // What's going on here? Shouldn't x* be simpler than this? It // turns out that when implementing leftmost-first (Perl-like) // match semantics, x* results in an incorrect preference order // when computing the transitive closure of states if and only if // 'x' can match the empty string. So instead, we compile x* as // (x+)?, which preserves the correct preference order. // // See: https://github.com/rust-lang/regex/issues/779 let compiled = self.c(expr)?; let plus = if greedy { self.add_union() } else { self.add_union_reverse() }?; self.patch(compiled.end, plus)?; self.patch(plus, compiled.start)?; let question = if greedy { self.add_union() } else { self.add_union_reverse() }?; let empty = self.add_empty()?; self.patch(question, compiled.start)?; self.patch(question, empty)?; self.patch(plus, empty)?; Ok(ThompsonRef { start: question, end: empty }) } else if n == 1 { let compiled = self.c(expr)?; let union = if greedy { self.add_union() } else { self.add_union_reverse() }?; self.patch(compiled.end, union)?; self.patch(union, compiled.start)?; Ok(ThompsonRef { start: compiled.start, end: union }) } else { let prefix = self.c_exactly(expr, n - 1)?; let last = self.c(expr)?; let union = if greedy { self.add_union() } else { self.add_union_reverse() }?; self.patch(prefix.end, last.start)?; self.patch(last.end, union)?; self.patch(union, last.start)?; Ok(ThompsonRef { start: prefix.start, end: union }) } } /// Compile the given expression such that it may be matched zero or one /// times. /// /// When `greedy` is true, then the preference is for the expression to /// match as much as possible. Otherwise, it will match as little as /// possible. fn c_zero_or_one( &self, expr: &Hir, greedy: bool, ) -> Result<ThompsonRef, BuildError> { let union = if greedy { self.add_union() } else { self.add_union_reverse() }?; let compiled = self.c(expr)?; let empty = self.add_empty()?; self.patch(union, compiled.start)?; self.patch(union, empty)?; self.patch(compiled.end, empty)?; Ok(ThompsonRef { start: union, end: empty }) } /// Compile the given HIR expression exactly `n` times. fn c_exactly( &self, expr: &Hir, n: u32, ) -> Result<ThompsonRef, BuildError> { let it = (0..n).map(|_| self.c(expr)); self.c_concat(it) } /// Compile the given byte oriented character class. /// /// This uses "sparse" states to represent an alternation between ranges in /// this character class. We can use "sparse" states instead of stitching /// together a "union" state because all ranges in a character class have /// equal priority *and* are non-overlapping (thus, only one can match, so /// there's never a question of priority in the first place). This saves a /// fair bit of overhead when traversing an NFA. /// /// This routine compiles an empty character class into a "fail" state. fn c_byte_class( &self, cls: &hir::ClassBytes, ) -> Result<ThompsonRef, BuildError> { let end = self.add_empty()?; let mut trans = Vec::with_capacity(cls.ranges().len()); for r in cls.iter() { trans.push(Transition { start: r.start(), end: r.end(), next: end, }); } Ok(ThompsonRef { start: self.add_sparse(trans)?, end }) } /// Compile the given Unicode character class. /// /// This routine specifically tries to use various types of compression, /// since UTF-8 automata of large classes can get quite large. The specific /// type of compression used depends on forward vs reverse compilation, and /// whether NFA shrinking is enabled or not. /// /// Aside from repetitions causing lots of repeat group, this is like the /// single most expensive part of regex compilation. Therefore, a large part /// of the expense of compilation may be reduce by disabling Unicode in the /// pattern. /// /// This routine compiles an empty character class into a "fail" state. fn c_unicode_class( &self, cls: &hir::ClassUnicode, ) -> Result<ThompsonRef, BuildError> { // If all we have are ASCII ranges wrapped in a Unicode package, then // there is zero reason to bring out the big guns. We can fit all ASCII // ranges within a single sparse state. if cls.is_ascii() { let end = self.add_empty()?; let mut trans = Vec::with_capacity(cls.ranges().len()); for r in cls.iter() { // The unwraps below are OK because we've verified that this // class only contains ASCII codepoints. trans.push(Transition { // FIXME(1.59): use the 'TryFrom<char> for u8' impl. start: u8::try_from(u32::from(r.start())).unwrap(), end: u8::try_from(u32::from(r.end())).unwrap(), next: end, }); } Ok(ThompsonRef { start: self.add_sparse(trans)?, end }) } else if self.is_reverse() { if !self.config.get_shrink() { // When we don't want to spend the extra time shrinking, we // compile the UTF-8 automaton in reverse using something like // the "naive" approach, but will attempt to re-use common // suffixes. self.c_unicode_class_reverse_with_suffix(cls) } else { // When we want to shrink our NFA for reverse UTF-8 automata, // we cannot feed UTF-8 sequences directly to the UTF-8 // compiler, since the UTF-8 compiler requires all sequences // to be lexicographically sorted. Instead, we organize our // sequences into a range trie, which can then output our // sequences in the correct order. Unfortunately, building the // range trie is fairly expensive (but not nearly as expensive // as building a DFA). Hence the reason why the 'shrink' option // exists, so that this path can be toggled off. For example, // we might want to turn this off if we know we won't be // compiling a DFA. let mut trie = self.trie_state.borrow_mut(); trie.clear(); for rng in cls.iter() { for mut seq in Utf8Sequences::new(rng.start(), rng.end()) { seq.reverse(); trie.insert(seq.as_slice()); } } let mut builder = self.builder.borrow_mut(); let mut utf8_state = self.utf8_state.borrow_mut(); let mut utf8c = Utf8Compiler::new(&mut *builder, &mut *utf8_state)?; trie.iter(|seq| { utf8c.add(&seq)?; Ok(()) })?; utf8c.finish() } } else { // In the forward direction, we always shrink our UTF-8 automata // because we can stream it right into the UTF-8 compiler. There // is almost no downside (in either memory or time) to using this // approach. let mut builder = self.builder.borrow_mut(); let mut utf8_state = self.utf8_state.borrow_mut(); let mut utf8c = Utf8Compiler::new(&mut *builder, &mut *utf8_state)?; for rng in cls.iter() { for seq in Utf8Sequences::new(rng.start(), rng.end()) { utf8c.add(seq.as_slice())?; } } utf8c.finish() } // For reference, the code below is the "naive" version of compiling a // UTF-8 automaton. It is deliciously simple (and works for both the // forward and reverse cases), but will unfortunately produce very // large NFAs. When compiling a forward automaton, the size difference // can sometimes be an order of magnitude. For example, the '\w' regex // will generate about ~3000 NFA states using the naive approach below, // but only 283 states when using the approach above. This is because // the approach above actually compiles a *minimal* (or near minimal, // because of the bounded hashmap for reusing equivalent states) UTF-8 // automaton. // // The code below is kept as a reference point in order to make it // easier to understand the higher level goal here. Although, it will // almost certainly bit-rot, so keep that in mind. Also, if you try to // use it, some of the tests in this module will fail because they look // for terser byte code produce by the more optimized handling above. // But the integration test suite should still pass. // // One good example of the substantial difference this can make is to // compare and contrast performance of the Pike VM when the code below // is active vs the code above. Here's an example to try: // // regex-cli find match pikevm -b -p '(?m)^\w{20}' -y '@$smallishru' // // With Unicode classes generated below, this search takes about 45s on // my machine. But with the compressed version above, the search takes // only around 1.4s. The NFA is also 20% smaller. This is in part due // to the compression, but also because of the utilization of 'sparse' // NFA states. They lead to much less state shuffling during the NFA // search. /* let it = cls .iter() .flat_map(|rng| Utf8Sequences::new(rng.start(), rng.end())) .map(|seq| { let it = seq .as_slice() .iter() .map(|rng| self.c_range(rng.start, rng.end)); self.c_concat(it) }); self.c_alt_iter(it) */ } /// Compile the given Unicode character class in reverse with suffix /// caching. /// /// This is a "quick" way to compile large Unicode classes into reverse /// UTF-8 automata while doing a small amount of compression on that /// automata by reusing common suffixes. /// /// A more comprehensive compression scheme can be accomplished by using /// a range trie to efficiently sort a reverse sequence of UTF-8 byte /// rqanges, and then use Daciuk's algorithm via `Utf8Compiler`. /// /// This is the technique used when "NFA shrinking" is disabled. /// /// (This also tries to use "sparse" states where possible, just like /// `c_byte_class` does.) fn c_unicode_class_reverse_with_suffix( &self, cls: &hir::ClassUnicode, ) -> Result<ThompsonRef, BuildError> { // N.B. It would likely be better to cache common *prefixes* in the // reverse direction, but it's not quite clear how to do that. The // advantage of caching suffixes is that it does give us a win, and // has a very small additional overhead. let mut cache = self.utf8_suffix.borrow_mut(); cache.clear(); let union = self.add_union()?; let alt_end = self.add_empty()?; for urng in cls.iter() { for seq in Utf8Sequences::new(urng.start(), urng.end()) { let mut end = alt_end; for brng in seq.as_slice() { let key = Utf8SuffixKey { from: end, start: brng.start, end: brng.end, }; let hash = cache.hash(&key); if let Some(id) = cache.get(&key, hash) { end = id; continue; } let compiled = self.c_range(brng.start, brng.end)?; self.patch(compiled.end, end)?; end = compiled.start; cache.set(key, hash, end); } self.patch(union, end)?; } } Ok(ThompsonRef { start: union, end: alt_end }) } /// Compile the given HIR look-around assertion to an NFA look-around /// assertion. fn c_look(&self, anchor: &hir::Look) -> Result<ThompsonRef, BuildError> { let look = match *anchor { hir::Look::Start => Look::Start, hir::Look::End => Look::End, hir::Look::StartLF => Look::StartLF, hir::Look::EndLF => Look::EndLF, hir::Look::StartCRLF => Look::StartCRLF, hir::Look::EndCRLF => Look::EndCRLF, hir::Look::WordAscii => Look::WordAscii, hir::Look::WordAsciiNegate => Look::WordAsciiNegate, hir::Look::WordUnicode => Look::WordUnicode, hir::Look::WordUnicodeNegate => Look::WordUnicodeNegate, }; let id = self.add_look(look)?; Ok(ThompsonRef { start: id, end: id }) } /// Compile the given byte string to a concatenation of bytes. fn c_literal(&self, bytes: &[u8]) -> Result<ThompsonRef, BuildError> { self.c_concat(bytes.iter().copied().map(|b| self.c_range(b, b))) } /// Compile a "range" state with one transition that may only be followed /// if the input byte is in the (inclusive) range given. /// /// Both the `start` and `end` locations point to the state created. /// Callers will likely want to keep the `start`, but patch the `end` to /// point to some other state. fn c_range(&self, start: u8, end: u8) -> Result<ThompsonRef, BuildError> { let id = self.add_range(start, end)?; Ok(ThompsonRef { start: id, end: id }) } /// Compile an "empty" state with one unconditional epsilon transition. /// /// Both the `start` and `end` locations point to the state created. /// Callers will likely want to keep the `start`, but patch the `end` to /// point to some other state. fn c_empty(&self) -> Result<ThompsonRef, BuildError> { let id = self.add_empty()?; Ok(ThompsonRef { start: id, end: id }) } /// Compile a "fail" state that can never have any outgoing transitions. fn c_fail(&self) -> Result<ThompsonRef, BuildError> { let id = self.add_fail()?; Ok(ThompsonRef { start: id, end: id }) } // The below helpers are meant to be simple wrappers around the // corresponding Builder methods. For the most part, they let us write // 'self.add_foo()' instead of 'self.builder.borrow_mut().add_foo()', where // the latter is a mouthful. Some of the methods do inject a little bit // of extra logic. e.g., Flipping look-around operators when compiling in // reverse mode. fn patch(&self, from: StateID, to: StateID) -> Result<(), BuildError> { self.builder.borrow_mut().patch(from, to) } fn start_pattern(&self) -> Result<PatternID, BuildError> { self.builder.borrow_mut().start_pattern() } fn finish_pattern( &self, start_id: StateID, ) -> Result<PatternID, BuildError> { self.builder.borrow_mut().finish_pattern(start_id) } fn add_empty(&self) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_empty() } fn add_range(&self, start: u8, end: u8) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_range(Transition { start, end, next: StateID::ZERO, }) } fn add_sparse( &self, ranges: Vec<Transition>, ) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_sparse(ranges) } fn add_look(&self, mut look: Look) -> Result<StateID, BuildError> { if self.is_reverse() { look = look.reversed(); } self.builder.borrow_mut().add_look(StateID::ZERO, look) } fn add_union(&self) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_union(vec![]) } fn add_union_reverse(&self) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_union_reverse(vec![]) } fn add_capture_start( &self, capture_index: u32, name: Option<&str>, ) -> Result<StateID, BuildError> { let name = name.map(|n| Arc::from(n)); self.builder.borrow_mut().add_capture_start( StateID::ZERO, capture_index, name, ) } fn add_capture_end( &self, capture_index: u32, ) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_capture_end(StateID::ZERO, capture_index) } fn add_fail(&self) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_fail() } fn add_match(&self) -> Result<StateID, BuildError> { self.builder.borrow_mut().add_match() } fn is_reverse(&self) -> bool { self.config.get_reverse() } } /// A value that represents the result of compiling a sub-expression of a /// regex's HIR. Specifically, this represents a sub-graph of the NFA that /// has an initial state at `start` and a final state at `end`. #[derive(Clone, Copy, Debug)] pub(crate) struct ThompsonRef { pub(crate) start: StateID, pub(crate) end: StateID, } /// A UTF-8 compiler based on Daciuk's algorithm for compilining minimal DFAs /// from a lexicographically sorted sequence of strings in linear time. /// /// The trick here is that any Unicode codepoint range can be converted to /// a sequence of byte ranges that form a UTF-8 automaton. Connecting them /// together via an alternation is trivial, and indeed, it works. However, /// there is a lot of redundant structure in many UTF-8 automatons. Since our /// UTF-8 ranges are in lexicographic order, we can use Daciuk's algorithm /// to build nearly minimal DFAs in linear time. (They are guaranteed to be /// minimal because we use a bounded cache of previously build DFA states.) /// /// The drawback is that this sadly doesn't work for reverse automata, since /// the ranges are no longer in lexicographic order. For that, we invented the /// range trie (which gets its own module). Once a range trie is built, we then /// use this same Utf8Compiler to build a reverse UTF-8 automaton. /// /// The high level idea is described here: /// https://blog.burntsushi.net/transducers/#finite-state-machines-as-data-structures /// /// There is also another implementation of this in the `fst` crate. #[derive(Debug)] struct Utf8Compiler<'a> { builder: &'a mut Builder, state: &'a mut Utf8State, target: StateID, } #[derive(Clone, Debug)] struct Utf8State { compiled: Utf8BoundedMap, uncompiled: Vec<Utf8Node>, } #[derive(Clone, Debug)] struct Utf8Node { trans: Vec<Transition>, last: Option<Utf8LastTransition>, } #[derive(Clone, Debug)] struct Utf8LastTransition { start: u8, end: u8, } impl Utf8State { fn new() -> Utf8State { Utf8State { compiled: Utf8BoundedMap::new(10_000), uncompiled: vec![] } } fn clear(&mut self) { self.compiled.clear(); self.uncompiled.clear(); } } impl<'a> Utf8Compiler<'a> { fn new( builder: &'a mut Builder, state: &'a mut Utf8State, ) -> Result<Utf8Compiler<'a>, BuildError> { let target = builder.add_empty()?; state.clear(); let mut utf8c = Utf8Compiler { builder, state, target }; utf8c.add_empty(); Ok(utf8c) } fn finish(&mut self) -> Result<ThompsonRef, BuildError> { self.compile_from(0)?; let node = self.pop_root(); let start = self.compile(node)?; Ok(ThompsonRef { start, end: self.target }) } fn add(&mut self, ranges: &[Utf8Range]) -> Result<(), BuildError> { let prefix_len = ranges .iter() .zip(&self.state.uncompiled) .take_while(|&(range, node)| { node.last.as_ref().map_or(false, |t| { (t.start, t.end) == (range.start, range.end) }) }) .count(); assert!(prefix_len < ranges.len()); self.compile_from(prefix_len)?; self.add_suffix(&ranges[prefix_len..]); Ok(()) } fn compile_from(&mut self, from: usize) -> Result<(), BuildError> { let mut next = self.target; while from + 1 < self.state.uncompiled.len() { let node = self.pop_freeze(next); next = self.compile(node)?; } self.top_last_freeze(next); Ok(()) } fn compile( &mut self, node: Vec<Transition>, ) -> Result<StateID, BuildError> { let hash = self.state.compiled.hash(&node); if let Some(id) = self.state.compiled.get(&node, hash) { return Ok(id); } let id = self.builder.add_sparse(node.clone())?; self.state.compiled.set(node, hash, id); Ok(id) } fn add_suffix(&mut self, ranges: &[Utf8Range]) { assert!(!ranges.is_empty()); let last = self .state .uncompiled .len() .checked_sub(1) .expect("non-empty nodes"); assert!(self.state.uncompiled[last].last.is_none()); self.state.uncompiled[last].last = Some(Utf8LastTransition { start: ranges[0].start, end: ranges[0].end, }); for r in &ranges[1..] { self.state.uncompiled.push(Utf8Node { trans: vec![], last: Some(Utf8LastTransition { start: r.start, end: r.end }), }); } } fn add_empty(&mut self) { self.state.uncompiled.push(Utf8Node { trans: vec![], last: None }); } fn pop_freeze(&mut self, next: StateID) -> Vec<Transition> { let mut uncompiled = self.state.uncompiled.pop().unwrap(); uncompiled.set_last_transition(next); uncompiled.trans } fn pop_root(&mut self) -> Vec<Transition> { assert_eq!(self.state.uncompiled.len(), 1); assert!(self.state.uncompiled[0].last.is_none()); self.state.uncompiled.pop().expect("non-empty nodes").trans } fn top_last_freeze(&mut self, next: StateID) { let last = self .state .uncompiled .len() .checked_sub(1) .expect("non-empty nodes"); self.state.uncompiled[last].set_last_transition(next); } } impl Utf8Node { fn set_last_transition(&mut self, next: StateID) { if let Some(last) = self.last.take() { self.trans.push(Transition { start: last.start, end: last.end, next, }); } } } #[cfg(test)] mod tests { use alloc::{vec, vec::Vec}; use crate::{ nfa::thompson::{SparseTransitions, State, Transition, NFA}, util::primitives::{PatternID, SmallIndex, StateID}, }; use super::*; fn build(pattern: &str) -> NFA { NFA::compiler() .configure( NFA::config() .which_captures(WhichCaptures::None) .unanchored_prefix(false), ) .build(pattern) .unwrap() } fn pid(id: usize) -> PatternID { PatternID::new(id).unwrap() } fn sid(id: usize) -> StateID { StateID::new(id).unwrap() } fn s_byte(byte: u8, next: usize) -> State { let next = sid(next); let trans = Transition { start: byte, end: byte, next }; State::ByteRange { trans } } fn s_range(start: u8, end: u8, next: usize) -> State { let next = sid(next); let trans = Transition { start, end, next }; State::ByteRange { trans } } fn s_sparse(transitions: &[(u8, u8, usize)]) -> State { let transitions = transitions .iter() .map(|&(start, end, next)| Transition { start, end, next: sid(next), }) .collect(); State::Sparse(SparseTransitions { transitions }) } fn s_bin_union(alt1: usize, alt2: usize) -> State { State::BinaryUnion { alt1: sid(alt1), alt2: sid(alt2) } } fn s_union(alts: &[usize]) -> State { State::Union { alternates: alts .iter() .map(|&id| sid(id)) .collect::<Vec<StateID>>() .into_boxed_slice(), } } fn s_cap(next: usize, pattern: usize, index: usize, slot: usize) -> State { State::Capture { next: sid(next), pattern_id: pid(pattern), group_index: SmallIndex::new(index).unwrap(), slot: SmallIndex::new(slot).unwrap(), } } fn s_fail() -> State { State::Fail } fn s_match(id: usize) -> State { State::Match { pattern_id: pid(id) } } // Test that building an unanchored NFA has an appropriate `(?s:.)*?` // prefix. #[test] fn compile_unanchored_prefix() { let nfa = NFA::compiler() .configure(NFA::config().which_captures(WhichCaptures::None)) .build(r"a") .unwrap(); assert_eq!( nfa.states(), &[ s_bin_union(2, 1), s_range(0, 255, 0), s_byte(b'a', 3), s_match(0), ] ); } #[test] fn compile_empty() { assert_eq!(build("").states(), &[s_match(0),]); } #[test] fn compile_literal() { assert_eq!(build("a").states(), &[s_byte(b'a', 1), s_match(0),]); assert_eq!( build("ab").states(), &[s_byte(b'a', 1), s_byte(b'b', 2), s_match(0),] ); assert_eq!( build("☃").states(), &[s_byte(0xE2, 1), s_byte(0x98, 2), s_byte(0x83, 3), s_match(0)] ); // Check that non-UTF-8 literals work. let nfa = NFA::compiler() .configure( NFA::config() .which_captures(WhichCaptures::None) .unanchored_prefix(false), ) .syntax(crate::util::syntax::Config::new().utf8(false)) .build(r"(?-u)\xFF") .unwrap(); assert_eq!(nfa.states(), &[s_byte(b'\xFF', 1), s_match(0),]); } #[test] fn compile_class_ascii() { assert_eq!( build(r"[a-z]").states(), &[s_range(b'a', b'z', 1), s_match(0),] ); assert_eq!( build(r"[x-za-c]").states(), &[s_sparse(&[(b'a', b'c', 1), (b'x', b'z', 1)]), s_match(0)] ); } #[test] #[cfg(not(miri))] fn compile_class_unicode() { assert_eq!( build(r"[\u03B1-\u03B4]").states(), &[s_range(0xB1, 0xB4, 2), s_byte(0xCE, 0), s_match(0)] ); assert_eq!( build(r"[\u03B1-\u03B4\u{1F919}-\u{1F91E}]").states(), &[ s_range(0xB1, 0xB4, 5), s_range(0x99, 0x9E, 5), s_byte(0xA4, 1), s_byte(0x9F, 2), s_sparse(&[(0xCE, 0xCE, 0), (0xF0, 0xF0, 3)]), s_match(0), ] ); assert_eq!( build(r"[a-z☃]").states(), &[ s_byte(0x83, 3), s_byte(0x98, 0), s_sparse(&[(b'a', b'z', 3), (0xE2, 0xE2, 1)]), s_match(0), ] ); } #[test] fn compile_repetition() { assert_eq!( build(r"a?").states(), &[s_bin_union(1, 2), s_byte(b'a', 2), s_match(0),] ); assert_eq!( build(r"a??").states(), &[s_bin_union(2, 1), s_byte(b'a', 2), s_match(0),] ); } #[test] fn compile_group() { assert_eq!( build(r"ab+").states(), &[s_byte(b'a', 1), s_byte(b'b', 2), s_bin_union(1, 3), s_match(0)] ); assert_eq!( build(r"(ab)").states(), &[s_byte(b'a', 1), s_byte(b'b', 2), s_match(0)] ); assert_eq!( build(r"(ab)+").states(), &[s_byte(b'a', 1), s_byte(b'b', 2), s_bin_union(0, 3), s_match(0)] ); } #[test] fn compile_alternation() { assert_eq!( build(r"a|b").states(), &[s_range(b'a', b'b', 1), s_match(0)] ); assert_eq!( build(r"ab|cd").states(), &[ s_byte(b'b', 3), s_byte(b'd', 3), s_sparse(&[(b'a', b'a', 0), (b'c', b'c', 1)]), s_match(0) ], ); assert_eq!( build(r"|b").states(), &[s_byte(b'b', 2), s_bin_union(2, 0), s_match(0)] ); assert_eq!( build(r"a|").states(), &[s_byte(b'a', 2), s_bin_union(0, 2), s_match(0)] ); } // This tests the use of a non-binary union, i.e., a state with more than // 2 unconditional epsilon transitions. The only place they tend to appear // is in reverse NFAs when shrinking is disabled. Otherwise, 'binary-union' // and 'sparse' tend to cover all other cases of alternation. #[test] fn compile_non_binary_union() { let nfa = NFA::compiler() .configure( NFA::config() .which_captures(WhichCaptures::None) .reverse(true) .shrink(false) .unanchored_prefix(false), ) .build(r"[\u1000\u2000\u3000]") .unwrap(); assert_eq!( nfa.states(), &[ s_union(&[3, 6, 9]), s_byte(0xE1, 10), s_byte(0x80, 1), s_byte(0x80, 2), s_byte(0xE2, 10), s_byte(0x80, 4), s_byte(0x80, 5), s_byte(0xE3, 10), s_byte(0x80, 7), s_byte(0x80, 8), s_match(0), ] ); } #[test] fn compile_many_start_pattern() { let nfa = NFA::compiler() .configure( NFA::config() .which_captures(WhichCaptures::None) .unanchored_prefix(false), ) .build_many(&["a", "b"]) .unwrap(); assert_eq!( nfa.states(), &[ s_byte(b'a', 1), s_match(0), s_byte(b'b', 3), s_match(1), s_bin_union(0, 2), ] ); assert_eq!(nfa.start_anchored().as_usize(), 4); assert_eq!(nfa.start_unanchored().as_usize(), 4); // Test that the start states for each individual pattern are correct. assert_eq!(nfa.start_pattern(pid(0)).unwrap(), sid(0)); assert_eq!(nfa.start_pattern(pid(1)).unwrap(), sid(2)); } // This tests that our compiler can handle an empty character class. At the // time of writing, the regex parser forbids it, so the only way to test it // is to provide a hand written HIR. #[test] fn empty_class_bytes() { use regex_syntax::hir::{Class, ClassBytes, Hir}; let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![]))); let config = NFA::config() .which_captures(WhichCaptures::None) .unanchored_prefix(false); let nfa = NFA::compiler().configure(config).build_from_hir(&hir).unwrap(); assert_eq!(nfa.states(), &[s_fail(), s_match(0)]); } // Like empty_class_bytes, but for a Unicode class. #[test] fn empty_class_unicode() { use regex_syntax::hir::{Class, ClassUnicode, Hir}; let hir = Hir::class(Class::Unicode(ClassUnicode::new(vec![]))); let config = NFA::config() .which_captures(WhichCaptures::None) .unanchored_prefix(false); let nfa = NFA::compiler().configure(config).build_from_hir(&hir).unwrap(); assert_eq!(nfa.states(), &[s_fail(), s_match(0)]); } #[test] fn compile_captures_all() { let nfa = NFA::compiler() .configure( NFA::config() .unanchored_prefix(false) .which_captures(WhichCaptures::All), ) .build("a(b)c") .unwrap(); assert_eq!( nfa.states(), &[ s_cap(1, 0, 0, 0), s_byte(b'a', 2), s_cap(3, 0, 1, 2), s_byte(b'b', 4), s_cap(5, 0, 1, 3), s_byte(b'c', 6), s_cap(7, 0, 0, 1), s_match(0) ] ); let ginfo = nfa.group_info(); assert_eq!(2, ginfo.all_group_len()); } #[test] fn compile_captures_implicit() { let nfa = NFA::compiler() .configure( NFA::config() .unanchored_prefix(false) .which_captures(WhichCaptures::Implicit), ) .build("a(b)c") .unwrap(); assert_eq!( nfa.states(), &[ s_cap(1, 0, 0, 0), s_byte(b'a', 2), s_byte(b'b', 3), s_byte(b'c', 4), s_cap(5, 0, 0, 1), s_match(0) ] ); let ginfo = nfa.group_info(); assert_eq!(1, ginfo.all_group_len()); } #[test] fn compile_captures_none() { let nfa = NFA::compiler() .configure( NFA::config() .unanchored_prefix(false) .which_captures(WhichCaptures::None), ) .build("a(b)c") .unwrap(); assert_eq!( nfa.states(), &[s_byte(b'a', 1), s_byte(b'b', 2), s_byte(b'c', 3), s_match(0)] ); let ginfo = nfa.group_info(); assert_eq!(0, ginfo.all_group_len()); } } <file_sep>/regex-automata/src/hybrid/mod.rs /*! A module for building and searching with lazy deterministic finite automata (DFAs). Like other modules in this crate, lazy DFAs support a rich regex syntax with Unicode features. The key feature of a lazy DFA is that it builds itself incrementally during search, and never uses more than a configured capacity of memory. Thus, when searching with a lazy DFA, one must supply a mutable "cache" in which the actual DFA's transition table is stored. If you're looking for fully compiled DFAs, then please see the top-level [`dfa` module](crate::dfa). # Overview This section gives a brief overview of the primary types in this module: * A [`regex::Regex`] provides a way to search for matches of a regular expression using lazy DFAs. This includes iterating over matches with both the start and end positions of each match. * A [`dfa::DFA`] provides direct low level access to a lazy DFA. # Example: basic regex searching This example shows how to compile a regex using the default configuration and then use it to find matches in a byte string: ``` use regex_automata::{hybrid::regex::Regex, Match}; let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; let mut cache = re.create_cache(); let haystack = "2018-12-24 2016-10-08"; let matches: Vec<Match> = re.find_iter(&mut cache, haystack).collect(); assert_eq!(matches, vec![ Match::must(0, 0..10), Match::must(0, 11..21), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Example: searching with multiple regexes The lazy DFAs in this module all fully support searching with multiple regexes simultaneously. You can use this support with standard leftmost-first style searching to find non-overlapping matches: ``` # if cfg!(miri) { return Ok(()); } // miri takes too long use regex_automata::{hybrid::regex::Regex, Match}; let re = Regex::new_many(&[r"\w+", r"\S+"])?; let mut cache = re.create_cache(); let haystack = "@foo bar"; let matches: Vec<Match> = re.find_iter(&mut cache, haystack).collect(); assert_eq!(matches, vec![ Match::must(1, 0..4), Match::must(0, 5..8), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # When should I use this? Generally speaking, if you can abide the use of mutable state during search, and you don't need things like capturing groups or Unicode word boundary support in non-ASCII text, then a lazy DFA is likely a robust choice with respect to both search speed and memory usage. Note however that its speed may be worse than a general purpose regex engine if you don't select a good [prefilter](crate::util::prefilter). If you know ahead of time that your pattern would result in a very large DFA if it was fully compiled, it may be better to use an NFA simulation instead of a lazy DFA. Either that, or increase the cache capacity of your lazy DFA to something that is big enough to hold the state machine (likely through experimentation). The issue here is that if the cache is too small, then it could wind up being reset too frequently and this might decrease searching speed significantly. # Differences with fully compiled DFAs A [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) and a [`dfa::regex::Regex`](crate::dfa::regex::Regex) both have the same capabilities (and similarly for their underlying DFAs), but they achieve them through different means. The main difference is that a hybrid or "lazy" regex builds its DFA lazily during search, where as a fully compiled regex will build its DFA at construction time. While building a DFA at search time might sound like it's slow, it tends to work out where most bytes seen during a search will reuse pre-built parts of the DFA and thus can be almost as fast as a fully compiled DFA. The main downside is that searching requires mutable space to store the DFA, and, in the worst case, a search can result in a new state being created for each byte seen, which would make searching quite a bit slower. A fully compiled DFA never has to worry about searches being slower once it's built. (Aside from, say, the transition table being so large that it is subject to harsh CPU cache effects.) However, of course, building a full DFA can be quite time consuming and memory hungry. Particularly when large Unicode character classes are used, which tend to translate into very large DFAs. A lazy DFA strikes a nice balance _in practice_, particularly in the presence of Unicode mode, by only building what is needed. It avoids the worst case exponential time complexity of DFA compilation by guaranteeing that it will only build at most one state per byte searched. While the worst case here can lead to a very high constant, it will never be exponential. # Syntax This module supports the same syntax as the `regex` crate, since they share the same parser. You can find an exhaustive list of supported syntax in the [documentation for the `regex` crate](https://docs.rs/regex/1/regex/#syntax). There are two things that are not supported by the lazy DFAs in this module: * Capturing groups. The DFAs (and [`Regex`](regex::Regex)es built on top of them) can only find the offsets of an entire match, but cannot resolve the offsets of each capturing group. This is because DFAs do not have the expressive power necessary. Note that it is okay to build a lazy DFA from an NFA that contains capture groups. The capture groups will simply be ignored. * Unicode word boundaries. These present particularly difficult challenges for DFA construction and would result in an explosion in the number of states. One can enable [`dfa::Config::unicode_word_boundary`] though, which provides heuristic support for Unicode word boundaries that only works on ASCII text. Otherwise, one can use `(?-u:\b)` for an ASCII word boundary, which will work on any input. There are no plans to lift either of these limitations. Note that these restrictions are identical to the restrictions on fully compiled DFAs. */ pub use self::{ error::{BuildError, CacheError}, id::LazyStateID, }; pub mod dfa; mod error; mod id; pub mod regex; mod search; <file_sep>/regex-lite/tests/string.rs use { anyhow::Result, regex_lite::{Regex, RegexBuilder}, regex_test::{ CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner, }, }; /// Tests the default configuration of the hybrid NFA/DFA. #[test] fn default() -> Result<()> { let mut runner = TestRunner::new()?; runner .expand(&["is_match", "find", "captures"], |test| test.compiles()) .blacklist_iter(super::BLACKLIST) .test_iter(crate::suite()?.iter(), compiler) .assert(); Ok(()) } fn run_test(re: &Regex, test: &RegexTest) -> TestResult { let hay = match std::str::from_utf8(test.haystack()) { Ok(hay) => hay, Err(err) => { return TestResult::fail(&format!( "haystack is not valid UTF-8: {}", err )); } }; match test.additional_name() { "is_match" => TestResult::matched(re.is_match(hay)), "find" => TestResult::matches( re.find_iter(hay) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|m| Match { id: 0, span: Span { start: m.start(), end: m.end() }, }), ), "captures" => { let it = re .captures_iter(hay) .take(test.match_limit().unwrap_or(std::usize::MAX)) .map(|caps| testify_captures(&caps)); TestResult::captures(it) } name => TestResult::fail(&format!("unrecognized test name: {}", name)), } } /// Converts the given regex test to a closure that searches with a /// `bytes::Regex`. If the test configuration is unsupported, then a /// `CompiledRegex` that skips the test is returned. fn compiler( test: &RegexTest, _patterns: &[String], ) -> anyhow::Result<CompiledRegex> { let Some(pattern) = skip_or_get_pattern(test) else { return Ok(CompiledRegex::skip()); }; let re = RegexBuilder::new(pattern) .case_insensitive(test.case_insensitive()) .build()?; Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) } /// Whether we should skip the given test or not. If not, return the single /// pattern from the given test. fn skip_or_get_pattern(test: &RegexTest) -> Option<&str> { // We're only testing Regex here, which supports one pattern only. let pattern = match test.regexes().len() { 1 => &test.regexes()[0], _ => return None, }; // If the test name contains 'regex-lite', then we ALWAYS run it. Because // those tests are specifically designed for regex-lite. So if they fail, // then something needs attention. if test.full_name().contains("regex-lite/") { return Some(pattern); } // If the pattern has a \p in it, then we almost certainly don't support // it. This probably skips more than we intend, but there are likely very // few tests that contain a \p that isn't also a Unicode class. if pattern.contains(r"\p") || pattern.contains(r"\P") { return None; } // Similar deal for Perl classes, but we can abide them if the haystack // is ASCII-only. if !test.haystack().is_ascii() { if pattern.contains(r"\d") || pattern.contains(r"\D") { return None; } if pattern.contains(r"\s") || pattern.contains(r"\S") { return None; } if pattern.contains(r"\w") || pattern.contains(r"\W") { return None; } } // And also same deal for word boundaries. if !test.haystack().is_ascii() { if pattern.contains(r"\b") || pattern.contains(r"\B") { return None; } } // We only test is_match, find_iter and captures_iter. All of those are // leftmost searches. if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) { return None; } // The top-level single-pattern regex API always uses leftmost-first. if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) { return None; } // The top-level regex API always runs unanchored searches. ... But we can // handle tests that are anchored but have only one match. if test.anchored() && test.match_limit() != Some(1) { return None; } // We don't support tests with explicit search bounds. We could probably // support this by using the 'find_at' (and such) APIs. let bounds = test.bounds(); if !(bounds.start == 0 && bounds.end == test.haystack().len()) { return None; } // The Regex API specifically does not support disabling UTF-8 mode because // it can only search &str which is always valid UTF-8. if !test.utf8() { return None; } // regex-lite doesn't support Unicode-aware case insensitive matching. if test.case_insensitive() && (!pattern.is_ascii() || !test.haystack().is_ascii()) { return None; } Some(pattern) } /// Convert `Captures` into the test suite's capture values. fn testify_captures(caps: &regex_lite::Captures<'_>) -> regex_test::Captures { let spans = caps.iter().map(|group| { group.map(|m| regex_test::Span { start: m.start(), end: m.end() }) }); // This unwrap is OK because we assume our 'caps' represents a match, and // a match always gives a non-zero number of groups with the first group // being non-None. regex_test::Captures::new(0, spans).unwrap() } <file_sep>/fuzz/fuzz_targets/ast_fuzz_regex.rs #![no_main] use { libfuzzer_sys::fuzz_target, regex::RegexBuilder, regex_syntax::ast::Ast, }; #[derive(Eq, PartialEq, arbitrary::Arbitrary)] struct FuzzData { ast: Ast, } impl std::fmt::Debug for FuzzData { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut builder = f.debug_struct("FuzzData"); builder.field("ast", &format!("{}", self.ast)); builder.finish() } } fuzz_target!(|data: FuzzData| { let _ = env_logger::try_init(); let pattern = format!("{}", data.ast); RegexBuilder::new(&pattern).size_limit(1 << 20).build().ok(); }); <file_sep>/fuzz/Cargo.toml [package] name = "regex-fuzz" version = "0.0.0" authors = [ "The Rust Project Developers", "<NAME> <<EMAIL>>", "<NAME> <<EMAIL>>", "<NAME> <<EMAIL>>", ] publish = false edition = "2021" [package.metadata] cargo-fuzz = true [dependencies] arbitrary = { version = "1.3.0", features = ["derive"] } libfuzzer-sys = { version = "0.4.1", features = ["arbitrary-derive"] } regex = { path = "..", features = ["logging"] } regex-automata = { path = "../regex-automata", features = ["logging"] } regex-lite = { path = "../regex-lite" } regex-syntax = { path = "../regex-syntax", features = ["arbitrary"] } [dependencies.env_logger] # Note that this is currently using an older version because of the dependency # tree explosion that happened in 0.10. version = "0.9.3" default-features = false features = ["atty", "humantime", "termcolor"] # Prevent this from interfering with workspaces [workspace] members = ["."] # NOTE: If you add a new fuzzer below, please make sure to add it to the # oss-fuzz-build.sh script, otherwise it won't get run in OSS-fuzz. [[bin]] name = "fuzz_regex_match" path = "fuzz_targets/fuzz_regex_match.rs" [[bin]] name = "fuzz_regex_lite_match" path = "fuzz_targets/fuzz_regex_lite_match.rs" [[bin]] name = "fuzz_regex_automata_deserialize_dense_dfa" path = "fuzz_targets/fuzz_regex_automata_deserialize_dense_dfa.rs" [[bin]] name = "fuzz_regex_automata_deserialize_sparse_dfa" path = "fuzz_targets/fuzz_regex_automata_deserialize_sparse_dfa.rs" [[bin]] name = "ast_roundtrip" path = "fuzz_targets/ast_roundtrip.rs" [[bin]] name = "ast_fuzz_match" path = "fuzz_targets/ast_fuzz_match.rs" [[bin]] name = "ast_fuzz_regex" path = "fuzz_targets/ast_fuzz_regex.rs" [[bin]] name = "ast_fuzz_match_bytes" path = "fuzz_targets/ast_fuzz_match_bytes.rs" [profile.release] opt-level = 3 debug = true [profile.dev] opt-level = 3 debug = true [profile.test] opt-level = 3 debug = true <file_sep>/regex-syntax/test #!/bin/bash set -e # cd to the directory containing this crate's Cargo.toml so that we don't need # to pass --manifest-path to every `cargo` command. cd "$(dirname "$0")" # This is a convenience script for running a broad swath of the syntax tests. echo "===== DEFAULT FEATURES ===" cargo test features=( std unicode unicode-age unicode-bool unicode-case unicode-gencat unicode-perl unicode-script unicode-segment ) for f in "${features[@]}"; do echo "=== FEATURE: $f ===" # We only run library tests because I couldn't figure out how to easily # make doc tests run in 'no_std' mode. In particular, without the Error # trait, using '?' in doc tests seems tricky. cargo test --no-default-features --lib --features "$f" done <file_sep>/regex-capi/Cargo.toml [package] name = "rure" version = "0.2.2" #:version authors = ["The Rust Project Developers"] license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/rust-lang/regex" documentation = "https://github.com/rust-lang/regex/tree/master/regex-capi" homepage = "https://github.com/rust-lang/regex" description = """ A C API for Rust's regular expression library. """ workspace = ".." edition = "2018" [lib] name = "rure" crate-type = ["staticlib", "cdylib", "rlib"] [dependencies] libc = "0.2" regex = { version = "1", path = ".." } <file_sep>/regex-capi/README.md C API for RUst's REgex engine ============================= rure is a C API to Rust's regex library, which guarantees linear time searching using finite automata. In exchange, it must give up some common regex features such as backreferences and arbitrary lookaround. It does however include capturing groups, lazy matching, Unicode support and word boundary assertions. Its matching semantics generally correspond to Perl's, or "leftmost first." Namely, the match locations reported correspond to the first match that would be found by a backtracking engine. The header file (`includes/rure.h`) serves as the primary API documentation of this library. Types and flags are documented first, and functions follow. The syntax and possibly other useful things are documented in the Rust API documentation: https://docs.rs/regex Examples -------- There are readable examples in the `ctest` and `examples` sub-directories. Assuming you have [Rust and Cargo installed](https://www.rust-lang.org/downloads.html) (and a C compiler), then this should work to run the `iter` example: ``` $ git clone git://github.com/rust-lang/regex $ cd regex/regex-capi/examples $ ./compile $ LD_LIBRARY_PATH=../target/release ./iter ``` Performance ----------- It's fast. Its core matching engine is a lazy DFA, which is what GNU grep and RE2 use. Like GNU grep, this regex engine can detect multi byte literals in the regex and will use fast literal string searching to quickly skip through the input to find possible match locations. All memory usage is bounded and all searching takes linear time with respect to the input string. For more details, see the PERFORMANCE guide: https://github.com/rust-lang/regex/blob/master/PERFORMANCE.md Text encoding ------------- All regular expressions must be valid UTF-8. The text encoding of haystacks is more complicated. To a first approximation, haystacks should be UTF-8. In fact, UTF-8 (and, one supposes, ASCII) is the only well defined text encoding supported by this library. It is impossible to match UTF-16, UTF-32 or any other encoding without first transcoding it to UTF-8. With that said, haystacks do not need to be valid UTF-8, and if they aren't valid UTF-8, no performance penalty is paid. Whether invalid UTF-8 is matched or not depends on the regular expression. For example, with the `RURE_FLAG_UNICODE` flag enabled, the regex `.` is guaranteed to match a single UTF-8 encoding of a Unicode codepoint (sans LF). In particular, it will not match invalid UTF-8 such as `\xFF`, nor will it match surrogate codepoints or "alternate" (i.e., non-minimal) encodings of codepoints. However, with the `RURE_FLAG_UNICODE` flag disabled, the regex `.` will match any *single* arbitrary byte (sans LF), including `\xFF`. This provides a useful invariant: wherever `RURE_FLAG_UNICODE` is set, the corresponding regex is guaranteed to match valid UTF-8. Invalid UTF-8 will always prevent a match from happening when the flag is set. Since flags can be toggled in the regular expression itself, this allows one to pick and choose which parts of the regular expression must match UTF-8 or not. Some good advice is to always enable the `RURE_FLAG_UNICODE` flag (which is enabled when using `rure_compile_must`) and selectively disable the flag when one wants to match arbitrary bytes. The flag can be disabled in a regular expression with `(?-u)`. Finally, if one wants to match specific invalid UTF-8 bytes, then you can use escape sequences. e.g., `(?-u)\\xFF` will match `\xFF`. It's not possible to use C literal escape sequences in this case since regular expressions must be valid UTF-8. Aborts ------ This library will abort your process if an unwinding panic is caught in the Rust code. Generally, a panic occurs when there is a bug in the program or if allocation failed. It is possible to cause this behavior by passing invalid inputs to some functions. For example, giving an invalid capture group index to `rure_captures_at` will cause Rust's bounds checks to fail, which will cause a panic, which will be caught and printed to stderr. The process will then `abort`. Missing ------- There are a few things missing from the C API that are present in the Rust API. There's no particular (known) reason why they don't, they just haven't been implemented yet. * Splitting a string by a regex. * Replacing regex matches in a string with some other text. <file_sep>/record/README.md This directory contains various recordings of results. These are committed to the repository so that they can be compared over time. (At the time of writing, there is no tooling for facilitating this comparison. It has to be done manually.) <file_sep>/testdata/crazy.toml [[test]] name = "nothing-empty" regex = [] haystack = "" matches = [] [[test]] name = "nothing-something" regex = [] haystack = "wat" matches = [] [[test]] name = "ranges" regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b' haystack = "num: 255" matches = [[5, 8]] [[test]] name = "ranges-not" regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b' haystack = "num: 256" matches = [] [[test]] name = "float1" regex = '[-+]?[0-9]*\.?[0-9]+' haystack = "0.1" matches = [[0, 3]] [[test]] name = "float2" regex = '[-+]?[0-9]*\.?[0-9]+' haystack = "0.1.2" matches = [[0, 3]] match-limit = 1 [[test]] name = "float3" regex = '[-+]?[0-9]*\.?[0-9]+' haystack = "a1.2" matches = [[1, 4]] [[test]] name = "float4" regex = '[-+]?[0-9]*\.?[0-9]+' haystack = "1.a" matches = [[0, 1]] [[test]] name = "float5" regex = '^[-+]?[0-9]*\.?[0-9]+$' haystack = "1.a" matches = [] [[test]] name = "email" regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b' haystack = "mine is <EMAIL> " matches = [[8, 26]] [[test]] name = "email-not" regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b' haystack = "mine is <EMAIL> " matches = [] [[test]] name = "email-big" regex = '''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''' haystack = "mine is <EMAIL> " matches = [[8, 26]] [[test]] name = "date1" regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' haystack = "1900-01-01" matches = [[0, 10]] unicode = false [[test]] name = "date2" regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' haystack = "1900-00-01" matches = [] unicode = false [[test]] name = "date3" regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' haystack = "1900-13-01" matches = [] unicode = false [[test]] name = "start-end-empty" regex = '^$' haystack = "" matches = [[0, 0]] [[test]] name = "start-end-empty-rev" regex = '$^' haystack = "" matches = [[0, 0]] [[test]] name = "start-end-empty-many-1" regex = '^$^$^$' haystack = "" matches = [[0, 0]] [[test]] name = "start-end-empty-many-2" regex = '^^^$$$' haystack = "" matches = [[0, 0]] [[test]] name = "start-end-empty-rep" regex = '(?:^$)*' haystack = "a\nb\nc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "start-end-empty-rep-rev" regex = '(?:$^)*' haystack = "a\nb\nc" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] [[test]] name = "neg-class-letter" regex = '[^ac]' haystack = "acx" matches = [[2, 3]] [[test]] name = "neg-class-letter-comma" regex = '[^a,]' haystack = "a,x" matches = [[2, 3]] [[test]] name = "neg-class-letter-space" regex = '[^a[:space:]]' haystack = "a x" matches = [[2, 3]] [[test]] name = "neg-class-comma" regex = '[^,]' haystack = ",,x" matches = [[2, 3]] [[test]] name = "neg-class-space" regex = '[^[:space:]]' haystack = " a" matches = [[1, 2]] [[test]] name = "neg-class-space-comma" regex = '[^,[:space:]]' haystack = ", a" matches = [[2, 3]] [[test]] name = "neg-class-comma-space" regex = '[^[:space:],]' haystack = " ,a" matches = [[2, 3]] [[test]] name = "neg-class-ascii" regex = '[^[:alpha:]Z]' haystack = "A1" matches = [[1, 2]] [[test]] name = "lazy-many-many" regex = '(?:(?:.*)*?)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "lazy-many-optional" regex = '(?:(?:.?)*?)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "lazy-one-many-many" regex = '(?:(?:.*)+?)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "lazy-one-many-optional" regex = '(?:(?:.?)+?)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "lazy-range-min-many" regex = '(?:(?:.*){1,}?)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "lazy-range-many" regex = '(?:(?:.*){1,2}?)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "greedy-many-many" regex = '(?:(?:.*)*)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "greedy-many-optional" regex = '(?:(?:.?)*)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "greedy-one-many-many" regex = '(?:(?:.*)+)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "greedy-one-many-optional" regex = '(?:(?:.?)+)=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "greedy-range-min-many" regex = '(?:(?:.*){1,})=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "greedy-range-many" regex = '(?:(?:.*){1,2})=' haystack = "a=b" matches = [[0, 2]] [[test]] name = "empty1" regex = '' haystack = "" matches = [[0, 0]] [[test]] name = "empty2" regex = '' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty3" regex = '(?:)' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty4" regex = '(?:)*' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty5" regex = '(?:)+' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty6" regex = '(?:)?' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty7" regex = '(?:)(?:)' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty8" regex = '(?:)+|z' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty9" regex = 'z|(?:)+' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty10" regex = '(?:)+|b' haystack = "abc" matches = [[0, 0], [1, 1], [2, 2], [3, 3]] [[test]] name = "empty11" regex = 'b|(?:)+' haystack = "abc" matches = [[0, 0], [1, 2], [3, 3]] <file_sep>/regex-capi/src/macros.rs macro_rules! ffi_fn { (fn $name:ident($($arg:ident: $arg_ty:ty),*,) -> $ret:ty $body:block) => { ffi_fn!(fn $name($($arg: $arg_ty),*) -> $ret $body); }; (fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => { #[no_mangle] pub extern fn $name($($arg: $arg_ty),*) -> $ret { use ::std::io::{self, Write}; use ::std::panic::{self, AssertUnwindSafe}; use ::libc::abort; match panic::catch_unwind(AssertUnwindSafe(move || $body)) { Ok(v) => v, Err(err) => { let msg = if let Some(&s) = err.downcast_ref::<&str>() { s.to_owned() } else if let Some(s) = err.downcast_ref::<String>() { s.to_owned() } else { "UNABLE TO SHOW RESULT OF PANIC.".to_owned() }; let _ = writeln!( &mut io::stderr(), "panic unwind caught, aborting: {:?}", msg); unsafe { abort() } } } } }; (fn $name:ident($($arg:ident: $arg_ty:ty),*,) $body:block) => { ffi_fn!(fn $name($($arg: $arg_ty),*) -> () $body); }; (fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block) => { ffi_fn!(fn $name($($arg: $arg_ty),*) -> () $body); }; } <file_sep>/regex-automata/src/util/captures.rs /*! Provides types for dealing with capturing groups. Capturing groups refer to sub-patterns of regexes that some regex engines can report matching offsets for. For example, matching `[a-z]([0-9]+)` against `a789` would give `a789` as the overall match (for the implicit capturing group at index `0`) and `789` as the match for the capturing group `([0-9]+)` (an explicit capturing group at index `1`). Not all regex engines can report match offsets for capturing groups. Indeed, to a first approximation, regex engines that can report capturing group offsets tend to be quite a bit slower than regex engines that can't. This is because tracking capturing groups at search time usually requires more "power" that in turn adds overhead. Other regex implementations might call capturing groups "submatches." # Overview The main types in this module are: * [`Captures`] records the capturing group offsets found during a search. It provides convenience routines for looking up capturing group offsets by either index or name. * [`GroupInfo`] records the mapping between capturing groups and "slots," where the latter are how capturing groups are recorded during a regex search. This also keeps a mapping from capturing group name to index, and capture group index to name. A `GroupInfo` is used by `Captures` internally to provide a convenient API. It is unlikely that you'll use a `GroupInfo` directly, but for example, if you've compiled an Thompson NFA, then you can use [`thompson::NFA::group_info`](crate::nfa::thompson::NFA::group_info) to get its underlying `GroupInfo`. */ use alloc::{string::String, sync::Arc, vec, vec::Vec}; use crate::util::{ interpolate, primitives::{ NonMaxUsize, PatternID, PatternIDError, PatternIDIter, SmallIndex, }, search::{Match, Span}, }; /// The span offsets of capturing groups after a match has been found. /// /// This type represents the output of regex engines that can report the /// offsets at which capturing groups matches or "submatches" occur. For /// example, the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM). When a match /// occurs, it will at minimum contain the [`PatternID`] of the pattern that /// matched. Depending upon how it was constructed, it may also contain the /// start/end offsets of the entire match of the pattern and the start/end /// offsets of each capturing group that participated in the match. /// /// Values of this type are always created for a specific [`GroupInfo`]. It is /// unspecified behavior to use a `Captures` value in a search with any regex /// engine that has a different `GroupInfo` than the one the `Captures` were /// created with. /// /// # Constructors /// /// There are three constructors for this type that control what kind of /// information is available upon a match: /// /// * [`Captures::all`]: Will store overall pattern match offsets in addition /// to the offsets of capturing groups that participated in the match. /// * [`Captures::matches`]: Will store only the overall pattern /// match offsets. The offsets of capturing groups (even ones that participated /// in the match) are not available. /// * [`Captures::empty`]: Will only store the pattern ID that matched. No /// match offsets are available at all. /// /// If you aren't sure which to choose, then pick the first one. The first one /// is what convenience routines like, /// [`PikeVM::create_captures`](crate::nfa::thompson::pikevm::PikeVM::create_captures), /// will use automatically. /// /// The main difference between these choices is performance. Namely, if you /// ask for _less_ information, then the execution of regex search may be able /// to run more quickly. /// /// # Notes /// /// It is worth pointing out that this type is not coupled to any one specific /// regex engine. Instead, its coupling is with [`GroupInfo`], which is the /// thing that is responsible for mapping capturing groups to "slot" offsets. /// Slot offsets are indices into a single sequence of memory at which matching /// haystack offsets for the corresponding group are written by regex engines. /// /// # Example /// /// This example shows how to parse a simple date and extract the components of /// the date via capturing groups: /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; /// /// let re = PikeVM::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "2010-03-14", &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: named capturing groups /// /// This example is like the one above, but leverages the ability to name /// capturing groups in order to make the code a bit clearer: /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; /// /// let re = PikeVM::new(r"^(?P<y>[0-9]{4})-(?P<m>[0-9]{2})-(?P<d>[0-9]{2})$")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "2010-03-14", &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(Span::from(0..4)), caps.get_group_by_name("y")); /// assert_eq!(Some(Span::from(5..7)), caps.get_group_by_name("m")); /// assert_eq!(Some(Span::from(8..10)), caps.get_group_by_name("d")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone)] pub struct Captures { /// The group info that these capture groups are coupled to. This is what /// gives the "convenience" of the `Captures` API. Namely, it provides the /// slot mapping and the name|-->index mapping for capture lookups by name. group_info: GroupInfo, /// The ID of the pattern that matched. Regex engines must set this to /// None when no match occurs. pid: Option<PatternID>, /// The slot values, i.e., submatch offsets. /// /// In theory, the smallest sequence of slots would be something like /// `max(groups(pattern) for pattern in regex) * 2`, but instead, we use /// `sum(groups(pattern) for pattern in regex) * 2`. Why? /// /// Well, the former could be used in theory, because we don't generally /// have any overlapping APIs that involve capturing groups. Therefore, /// there's technically never any need to have slots set for multiple /// patterns. However, this might change some day, in which case, we would /// need to have slots available. /// /// The other reason is that during the execution of some regex engines, /// there exists a point in time where multiple slots for different /// patterns may be written to before knowing which pattern has matched. /// Therefore, the regex engines themselves, in order to support multiple /// patterns correctly, must have all slots available. If `Captures` /// doesn't have all slots available, then regex engines can't write /// directly into the caller provided `Captures` and must instead write /// into some other storage and then copy the slots involved in the match /// at the end of the search. /// /// So overall, at least as of the time of writing, it seems like the path /// of least resistance is to just require allocating all possible slots /// instead of the conceptual minimum. Another way to justify this is that /// the most common case is a single pattern, in which case, there is no /// inefficiency here since the 'max' and 'sum' calculations above are /// equivalent in that case. /// /// N.B. The mapping from group index to slot is maintained by `GroupInfo` /// and is considered an API guarantee. See `GroupInfo` for more details on /// that mapping. /// /// N.B. `Option<NonMaxUsize>` has the same size as a `usize`. slots: Vec<Option<NonMaxUsize>>, } impl Captures { /// Create new storage for the offsets of all matching capturing groups. /// /// This routine provides the most information for matches---namely, the /// spans of matching capturing groups---but also requires the regex search /// routines to do the most work. /// /// It is unspecified behavior to use the returned `Captures` value in a /// search with a `GroupInfo` other than the one that is provided to this /// constructor. /// /// # Example /// /// This example shows that all capturing groups---but only ones that /// participated in a match---are available to query after a match has /// been found: /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::captures::Captures, /// Span, Match, /// }; /// /// let re = PikeVM::new( /// r"^(?:(?P<lower>[a-z]+)|(?P<upper>[A-Z]+))(?P<digits>[0-9]+)$", /// )?; /// let mut cache = re.create_cache(); /// let mut caps = Captures::all(re.get_nfa().group_info().clone()); /// /// re.captures(&mut cache, "ABC123", &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); /// // The 'lower' group didn't match, so it won't have any offsets. /// assert_eq!(None, caps.get_group_by_name("lower")); /// assert_eq!(Some(Span::from(0..3)), caps.get_group_by_name("upper")); /// assert_eq!(Some(Span::from(3..6)), caps.get_group_by_name("digits")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn all(group_info: GroupInfo) -> Captures { let slots = group_info.slot_len(); Captures { group_info, pid: None, slots: vec![None; slots] } } /// Create new storage for only the full match spans of a pattern. This /// does not include any capturing group offsets. /// /// It is unspecified behavior to use the returned `Captures` value in a /// search with a `GroupInfo` other than the one that is provided to this /// constructor. /// /// # Example /// /// This example shows that only overall match offsets are reported when /// this constructor is used. Accessing any capturing groups other than /// the 0th will always return `None`. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::captures::Captures, /// Match, /// }; /// /// let re = PikeVM::new( /// r"^(?:(?P<lower>[a-z]+)|(?P<upper>[A-Z]+))(?P<digits>[0-9]+)$", /// )?; /// let mut cache = re.create_cache(); /// let mut caps = Captures::matches(re.get_nfa().group_info().clone()); /// /// re.captures(&mut cache, "ABC123", &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); /// // We didn't ask for capturing group offsets, so they aren't available. /// assert_eq!(None, caps.get_group_by_name("lower")); /// assert_eq!(None, caps.get_group_by_name("upper")); /// assert_eq!(None, caps.get_group_by_name("digits")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn matches(group_info: GroupInfo) -> Captures { // This is OK because we know there are at least this many slots, // and GroupInfo construction guarantees that the number of slots fits // into a usize. let slots = group_info.pattern_len().checked_mul(2).unwrap(); Captures { group_info, pid: None, slots: vec![None; slots] } } /// Create new storage for only tracking which pattern matched. No offsets /// are stored at all. /// /// It is unspecified behavior to use the returned `Captures` value in a /// search with a `GroupInfo` other than the one that is provided to this /// constructor. /// /// # Example /// /// This example shows that only the pattern that matched can be accessed /// from a `Captures` value created via this constructor. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::captures::Captures, /// PatternID, /// }; /// /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; /// let mut cache = re.create_cache(); /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); /// /// re.captures(&mut cache, "aABCz", &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(PatternID::must(0)), caps.pattern()); /// // We didn't ask for any offsets, so they aren't available. /// assert_eq!(None, caps.get_match()); /// /// re.captures(&mut cache, &"aABCz"[1..], &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); /// // We didn't ask for any offsets, so they aren't available. /// assert_eq!(None, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn empty(group_info: GroupInfo) -> Captures { Captures { group_info, pid: None, slots: vec![] } } /// Returns true if and only if this capturing group represents a match. /// /// This is a convenience routine for `caps.pattern().is_some()`. /// /// # Example /// /// When using the PikeVM (for example), the lightest weight way of /// detecting whether a match exists is to create capturing groups that /// only track the ID of the pattern that match (if any): /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::captures::Captures, /// }; /// /// let re = PikeVM::new(r"[a-z]+")?; /// let mut cache = re.create_cache(); /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); /// /// re.captures(&mut cache, "aABCz", &mut caps); /// assert!(caps.is_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_match(&self) -> bool { self.pid.is_some() } /// Returns the identifier of the pattern that matched when this /// capturing group represents a match. If no match was found, then this /// always returns `None`. /// /// This returns a pattern ID in precisely the cases in which `is_match` /// returns `true`. Similarly, the pattern ID returned is always the /// same pattern ID found in the `Match` returned by `get_match`. /// /// # Example /// /// When using the PikeVM (for example), the lightest weight way of /// detecting which pattern matched is to create capturing groups that only /// track the ID of the pattern that match (if any): /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::captures::Captures, /// PatternID, /// }; /// /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; /// let mut cache = re.create_cache(); /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); /// /// re.captures(&mut cache, "ABC", &mut caps); /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); /// // Recall that offsets are only available when using a non-empty /// // Captures value. So even though a match occurred, this returns None! /// assert_eq!(None, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn pattern(&self) -> Option<PatternID> { self.pid } /// Returns the pattern ID and the span of the match, if one occurred. /// /// This always returns `None` when `Captures` was created with /// [`Captures::empty`], even if a match was found. /// /// If this routine returns a non-`None` value, then `is_match` is /// guaranteed to return `true` and `pattern` is also guaranteed to return /// a non-`None` value. /// /// # Example /// /// This example shows how to get the full match from a search: /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "ABC", &mut caps); /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn get_match(&self) -> Option<Match> { Some(Match::new(self.pattern()?, self.get_group(0)?)) } /// Returns the span of a capturing group match corresponding to the group /// index given, only if both the overall pattern matched and the capturing /// group participated in that match. /// /// This returns `None` if `index` is invalid. `index` is valid if and only /// if it's less than [`Captures::group_len`] for the matching pattern. /// /// This always returns `None` when `Captures` was created with /// [`Captures::empty`], even if a match was found. This also always /// returns `None` for any `index > 0` when `Captures` was created with /// [`Captures::matches`]. /// /// If this routine returns a non-`None` value, then `is_match` is /// guaranteed to return `true`, `pattern` is guaranteed to return a /// non-`None` value and `get_match` is guaranteed to return a non-`None` /// value. /// /// By convention, the 0th capture group will always return the same /// span as the span returned by `get_match`. This is because the 0th /// capture group always corresponds to the entirety of the pattern's /// match. (It is similarly always unnamed because it is implicit.) This /// isn't necessarily true of all regex engines. For example, one can /// hand-compile a [`thompson::NFA`](crate::nfa::thompson::NFA) via a /// [`thompson::Builder`](crate::nfa::thompson::Builder), which isn't /// technically forced to make the 0th capturing group always correspond to /// the entire match. /// /// # Example /// /// This example shows how to get the capturing groups, by index, from a /// match: /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span, Match}; /// /// let re = PikeVM::new(r"^(?P<first>\pL+)\s+(?P<last>\pL+)$")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); /// assert_eq!(Some(Span::from(0..5)), caps.get_group(1)); /// assert_eq!(Some(Span::from(6..17)), caps.get_group(2)); /// // Looking for a non-existent capturing group will return None: /// assert_eq!(None, caps.get_group(3)); /// assert_eq!(None, caps.get_group(9944060567225171988)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn get_group(&self, index: usize) -> Option<Span> { let pid = self.pattern()?; // There's a little bit of work needed to map captures to slots in the // fully general case. But in the overwhelming common case of a single // pattern, we can just do some simple arithmetic. let (slot_start, slot_end) = if self.group_info().pattern_len() == 1 { (index.checked_mul(2)?, index.checked_mul(2)?.checked_add(1)?) } else { self.group_info().slots(pid, index)? }; let start = self.slots.get(slot_start).copied()??; let end = self.slots.get(slot_end).copied()??; Some(Span { start: start.get(), end: end.get() }) } /// Returns the span of a capturing group match corresponding to the group /// name given, only if both the overall pattern matched and the capturing /// group participated in that match. /// /// This returns `None` if `name` does not correspond to a valid capturing /// group for the pattern that matched. /// /// This always returns `None` when `Captures` was created with /// [`Captures::empty`], even if a match was found. This also always /// returns `None` for any `index > 0` when `Captures` was created with /// [`Captures::matches`]. /// /// If this routine returns a non-`None` value, then `is_match` is /// guaranteed to return `true`, `pattern` is guaranteed to return a /// non-`None` value and `get_match` is guaranteed to return a non-`None` /// value. /// /// # Example /// /// This example shows how to get the capturing groups, by name, from a /// match: /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span, Match}; /// /// let re = PikeVM::new(r"^(?P<first>\pL+)\s+(?P<last>\pL+)$")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); /// assert_eq!(Some(Span::from(0..5)), caps.get_group_by_name("first")); /// assert_eq!(Some(Span::from(6..17)), caps.get_group_by_name("last")); /// // Looking for a non-existent capturing group will return None: /// assert_eq!(None, caps.get_group_by_name("middle")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn get_group_by_name(&self, name: &str) -> Option<Span> { let index = self.group_info().to_index(self.pattern()?, name)?; self.get_group(index) } /// Returns an iterator of possible spans for every capturing group in the /// matching pattern. /// /// If this `Captures` value does not correspond to a match, then the /// iterator returned yields no elements. /// /// Note that the iterator returned yields elements of type `Option<Span>`. /// A span is present if and only if it corresponds to a capturing group /// that participated in a match. /// /// # Example /// /// This example shows how to collect all capturing groups: /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; /// /// let re = PikeVM::new( /// // Matches first/last names, with an optional middle name. /// r"^(?P<first>\pL+)\s+(?:(?P<middle>\pL+)\s+)?(?P<last>\pL+)$", /// )?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert!(caps.is_match()); /// let groups: Vec<Option<Span>> = caps.iter().collect(); /// assert_eq!(groups, vec![ /// Some(Span::from(0..18)), /// Some(Span::from(0..5)), /// Some(Span::from(6..11)), /// Some(Span::from(12..18)), /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This example uses the same regex as the previous example, but with a /// haystack that omits the middle name. This results in a capturing group /// that is present in the elements yielded by the iterator but without a /// match: /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; /// /// let re = PikeVM::new( /// // Matches first/last names, with an optional middle name. /// r"^(?P<first>\pL+)\s+(?:(?P<middle>\pL+)\s+)?(?P<last>\pL+)$", /// )?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert!(caps.is_match()); /// let groups: Vec<Option<Span>> = caps.iter().collect(); /// assert_eq!(groups, vec![ /// Some(Span::from(0..12)), /// Some(Span::from(0..5)), /// None, /// Some(Span::from(6..12)), /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn iter(&self) -> CapturesPatternIter<'_> { let names = self .pattern() .map_or(GroupInfoPatternNames::empty().enumerate(), |pid| { self.group_info().pattern_names(pid).enumerate() }); CapturesPatternIter { caps: self, names } } /// Return the total number of capturing groups for the matching pattern. /// /// If this `Captures` value does not correspond to a match, then this /// always returns `0`. /// /// This always returns the same number of elements yielded by /// [`Captures::iter`]. That is, the number includes capturing groups even /// if they don't participate in the match. /// /// # Example /// /// This example shows how to count the total number of capturing groups /// associated with a pattern. Notice that it includes groups that did not /// participate in a match (just like `Captures::iter` does). /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::new( /// // Matches first/last names, with an optional middle name. /// r"^(?P<first>\pL+)\s+(?:(?P<middle>\pL+)\s+)?(?P<last>\pL+)$", /// )?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert_eq!(4, caps.group_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn group_len(&self) -> usize { let pid = match self.pattern() { None => return 0, Some(pid) => pid, }; self.group_info().group_len(pid) } /// Returns a reference to the underlying group info on which these /// captures are based. /// /// The difference between `GroupInfo` and `Captures` is that the former /// defines the structure of capturing groups where as the latter is what /// stores the actual match information. So where as `Captures` only gives /// you access to the current match, `GroupInfo` lets you query any /// information about all capturing groups, even ones for patterns that /// weren't involved in a match. /// /// Note that a `GroupInfo` uses reference counting internally, so it may /// be cloned cheaply. /// /// # Example /// /// This example shows how to get all capturing group names from the /// underlying `GroupInfo`. Notice that we don't even need to run a /// search. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; /// /// let re = PikeVM::new_many(&[ /// r"(?P<foo>a)", /// r"(a)(b)", /// r"ab", /// r"(?P<bar>a)(?P<quux>a)", /// r"(?P<foo>z)", /// ])?; /// let caps = re.create_captures(); /// /// let expected = vec![ /// (PatternID::must(0), 0, None), /// (PatternID::must(0), 1, Some("foo")), /// (PatternID::must(1), 0, None), /// (PatternID::must(1), 1, None), /// (PatternID::must(1), 2, None), /// (PatternID::must(2), 0, None), /// (PatternID::must(3), 0, None), /// (PatternID::must(3), 1, Some("bar")), /// (PatternID::must(3), 2, Some("quux")), /// (PatternID::must(4), 0, None), /// (PatternID::must(4), 1, Some("foo")), /// ]; /// // We could also just use 're.get_nfa().group_info()'. /// let got: Vec<(PatternID, usize, Option<&str>)> = /// caps.group_info().all_names().collect(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn group_info(&self) -> &GroupInfo { &self.group_info } /// Interpolates the capture references in `replacement` with the /// corresponding substrings in `haystack` matched by each reference. The /// interpolated string is returned. /// /// See the [`interpolate` module](interpolate) for documentation on the /// format of the replacement string. /// /// # Example /// /// This example shows how to use interpolation, and also shows how it /// can work with multi-pattern regexes. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; /// /// let re = PikeVM::new_many(&[ /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", /// r"(?<year>[0-9]{4})-(?<month>[0-9]{2})-(?<day>[0-9]{2})", /// ])?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let replacement = "year=$year, month=$month, day=$day"; /// /// // This matches the first pattern. /// let hay = "On 14-03-2010, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let result = caps.interpolate_string(hay, replacement); /// assert_eq!("year=2010, month=03, day=14", result); /// /// // And this matches the second pattern. /// let hay = "On 2010-03-14, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let result = caps.interpolate_string(hay, replacement); /// assert_eq!("year=2010, month=03, day=14", result); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn interpolate_string( &self, haystack: &str, replacement: &str, ) -> String { let mut dst = String::new(); self.interpolate_string_into(haystack, replacement, &mut dst); dst } /// Interpolates the capture references in `replacement` with the /// corresponding substrings in `haystack` matched by each reference. The /// interpolated string is written to `dst`. /// /// See the [`interpolate` module](interpolate) for documentation on the /// format of the replacement string. /// /// # Example /// /// This example shows how to use interpolation, and also shows how it /// can work with multi-pattern regexes. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; /// /// let re = PikeVM::new_many(&[ /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", /// r"(?<year>[0-9]{4})-(?<month>[0-9]{2})-(?<day>[0-9]{2})", /// ])?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let replacement = "year=$year, month=$month, day=$day"; /// /// // This matches the first pattern. /// let hay = "On 14-03-2010, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let mut dst = String::new(); /// caps.interpolate_string_into(hay, replacement, &mut dst); /// assert_eq!("year=2010, month=03, day=14", dst); /// /// // And this matches the second pattern. /// let hay = "On 2010-03-14, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let mut dst = String::new(); /// caps.interpolate_string_into(hay, replacement, &mut dst); /// assert_eq!("year=2010, month=03, day=14", dst); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn interpolate_string_into( &self, haystack: &str, replacement: &str, dst: &mut String, ) { interpolate::string( replacement, |index, dst| { let span = match self.get_group(index) { None => return, Some(span) => span, }; dst.push_str(&haystack[span]); }, |name| self.group_info().to_index(self.pattern()?, name), dst, ); } /// Interpolates the capture references in `replacement` with the /// corresponding substrings in `haystack` matched by each reference. The /// interpolated byte string is returned. /// /// See the [`interpolate` module](interpolate) for documentation on the /// format of the replacement string. /// /// # Example /// /// This example shows how to use interpolation, and also shows how it /// can work with multi-pattern regexes. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; /// /// let re = PikeVM::new_many(&[ /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", /// r"(?<year>[0-9]{4})-(?<month>[0-9]{2})-(?<day>[0-9]{2})", /// ])?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let replacement = b"year=$year, month=$month, day=$day"; /// /// // This matches the first pattern. /// let hay = b"On 14-03-2010, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let result = caps.interpolate_bytes(hay, replacement); /// assert_eq!(&b"year=2010, month=03, day=14"[..], result); /// /// // And this matches the second pattern. /// let hay = b"On 2010-03-14, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let result = caps.interpolate_bytes(hay, replacement); /// assert_eq!(&b"year=2010, month=03, day=14"[..], result); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn interpolate_bytes( &self, haystack: &[u8], replacement: &[u8], ) -> Vec<u8> { let mut dst = vec![]; self.interpolate_bytes_into(haystack, replacement, &mut dst); dst } /// Interpolates the capture references in `replacement` with the /// corresponding substrings in `haystack` matched by each reference. The /// interpolated byte string is written to `dst`. /// /// See the [`interpolate` module](interpolate) for documentation on the /// format of the replacement string. /// /// # Example /// /// This example shows how to use interpolation, and also shows how it /// can work with multi-pattern regexes. /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; /// /// let re = PikeVM::new_many(&[ /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", /// r"(?<year>[0-9]{4})-(?<month>[0-9]{2})-(?<day>[0-9]{2})", /// ])?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let replacement = b"year=$year, month=$month, day=$day"; /// /// // This matches the first pattern. /// let hay = b"On 14-03-2010, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let mut dst = vec![]; /// caps.interpolate_bytes_into(hay, replacement, &mut dst); /// assert_eq!(&b"year=2010, month=03, day=14"[..], dst); /// /// // And this matches the second pattern. /// let hay = b"On 2010-03-14, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// let mut dst = vec![]; /// caps.interpolate_bytes_into(hay, replacement, &mut dst); /// assert_eq!(&b"year=2010, month=03, day=14"[..], dst); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn interpolate_bytes_into( &self, haystack: &[u8], replacement: &[u8], dst: &mut Vec<u8>, ) { interpolate::bytes( replacement, |index, dst| { let span = match self.get_group(index) { None => return, Some(span) => span, }; dst.extend_from_slice(&haystack[span]); }, |name| self.group_info().to_index(self.pattern()?, name), dst, ); } /// This is a convenience routine for extracting the substrings /// corresponding to matching capture groups in the given `haystack`. The /// `haystack` should be the same substring used to find the match spans in /// this `Captures` value. /// /// This is identical to [`Captures::extract_bytes`], except it works with /// `&str` instead of `&[u8]`. /// /// # Panics /// /// This panics if the number of explicit matching groups in this /// `Captures` value is less than `N`. This also panics if this `Captures` /// value does not correspond to a match. /// /// Note that this does *not* panic if the number of explicit matching /// groups is bigger than `N`. In that case, only the first `N` matching /// groups are extracted. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})")?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let hay = "On 2010-03-14, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// assert!(caps.is_match()); /// let (full, [year, month, day]) = caps.extract(hay); /// assert_eq!("2010-03-14", full); /// assert_eq!("2010", year); /// assert_eq!("03", month); /// assert_eq!("14", day); /// /// // We can also ask for fewer than all capture groups. /// let (full, [year]) = caps.extract(hay); /// assert_eq!("2010-03-14", full); /// assert_eq!("2010", year); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn extract<'h, const N: usize>( &self, haystack: &'h str, ) -> (&'h str, [&'h str; N]) { let mut matched = self.iter().flatten(); let whole_match = &haystack[matched.next().expect("a match")]; let group_matches = [0; N].map(|_| { let sp = matched.next().expect("too few matching groups"); &haystack[sp] }); (whole_match, group_matches) } /// This is a convenience routine for extracting the substrings /// corresponding to matching capture groups in the given `haystack`. The /// `haystack` should be the same substring used to find the match spans in /// this `Captures` value. /// /// This is identical to [`Captures::extract`], except it works with /// `&[u8]` instead of `&str`. /// /// # Panics /// /// This panics if the number of explicit matching groups in this /// `Captures` value is less than `N`. This also panics if this `Captures` /// value does not correspond to a match. /// /// Note that this does *not* panic if the number of explicit matching /// groups is bigger than `N`. In that case, only the first `N` matching /// groups are extracted. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})")?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// let hay = b"On 2010-03-14, I became a Tenneessee lamb."; /// re.captures(&mut cache, hay, &mut caps); /// assert!(caps.is_match()); /// let (full, [year, month, day]) = caps.extract_bytes(hay); /// assert_eq!(b"2010-03-14", full); /// assert_eq!(b"2010", year); /// assert_eq!(b"03", month); /// assert_eq!(b"14", day); /// /// // We can also ask for fewer than all capture groups. /// let (full, [year]) = caps.extract_bytes(hay); /// assert_eq!(b"2010-03-14", full); /// assert_eq!(b"2010", year); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn extract_bytes<'h, const N: usize>( &self, haystack: &'h [u8], ) -> (&'h [u8], [&'h [u8]; N]) { let mut matched = self.iter().flatten(); let whole_match = &haystack[matched.next().expect("a match")]; let group_matches = [0; N].map(|_| { let sp = matched.next().expect("too few matching groups"); &haystack[sp] }); (whole_match, group_matches) } } /// Lower level "slot" oriented APIs. One does not typically need to use these /// when executing a search. They are instead mostly intended for folks that /// are writing their own regex engine while reusing this `Captures` type. impl Captures { /// Clear this `Captures` value. /// /// After clearing, all slots inside this `Captures` value will be set to /// `None`. Similarly, any pattern ID that it was previously associated /// with (for a match) is erased. /// /// It is not usually necessary to call this routine. Namely, a `Captures` /// value only provides high level access to the capturing groups of the /// pattern that matched, and only low level access to individual slots. /// Thus, even if slots corresponding to groups that aren't associated /// with the matching pattern are set, then it won't impact the higher /// level APIs. Namely, higher level APIs like [`Captures::get_group`] will /// return `None` if no pattern ID is present, even if there are spans set /// in the underlying slots. /// /// Thus, to "clear" a `Captures` value of a match, it is usually only /// necessary to call [`Captures::set_pattern`] with `None`. /// /// # Example /// /// This example shows what happens when a `Captures` value is cleared. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::new(r"^(?P<first>\pL+)\s+(?P<last>\pL+)$")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert!(caps.is_match()); /// let slots: Vec<Option<usize>> = /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); /// // Note that the following ordering is considered an API guarantee. /// assert_eq!(slots, vec![ /// Some(0), /// Some(17), /// Some(0), /// Some(5), /// Some(6), /// Some(17), /// ]); /// /// // Now clear the slots. Everything is gone and it is no longer a match. /// caps.clear(); /// assert!(!caps.is_match()); /// let slots: Vec<Option<usize>> = /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); /// assert_eq!(slots, vec![ /// None, /// None, /// None, /// None, /// None, /// None, /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn clear(&mut self) { self.pid = None; for slot in self.slots.iter_mut() { *slot = None; } } /// Set the pattern on this `Captures` value. /// /// When the pattern ID is `None`, then this `Captures` value does not /// correspond to a match (`is_match` will return `false`). Otherwise, it /// corresponds to a match. /// /// This is useful in search implementations where you might want to /// initially call `set_pattern(None)` in order to avoid the cost of /// calling `clear()` if it turns out to not be necessary. /// /// # Example /// /// This example shows that `set_pattern` merely overwrites the pattern ID. /// It does not actually change the underlying slot values. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::new(r"^(?P<first>\pL+)\s+(?P<last>\pL+)$")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert!(caps.is_match()); /// assert!(caps.pattern().is_some()); /// let slots: Vec<Option<usize>> = /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); /// // Note that the following ordering is considered an API guarantee. /// assert_eq!(slots, vec![ /// Some(0), /// Some(17), /// Some(0), /// Some(5), /// Some(6), /// Some(17), /// ]); /// /// // Now set the pattern to None. Note that the slot values remain. /// caps.set_pattern(None); /// assert!(!caps.is_match()); /// assert!(!caps.pattern().is_some()); /// let slots: Vec<Option<usize>> = /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); /// // Note that the following ordering is considered an API guarantee. /// assert_eq!(slots, vec![ /// Some(0), /// Some(17), /// Some(0), /// Some(5), /// Some(6), /// Some(17), /// ]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn set_pattern(&mut self, pid: Option<PatternID>) { self.pid = pid; } /// Returns the underlying slots, where each slot stores a single offset. /// /// Every matching capturing group generally corresponds to two slots: one /// slot for the starting position and another for the ending position. /// Typically, either both are present or neither are. (The weasel word /// "typically" is used here because it really depends on the regex engine /// implementation. Every sensible regex engine likely adheres to this /// invariant, and every regex engine in this crate is sensible.) /// /// Generally speaking, callers should prefer to use higher level routines /// like [`Captures::get_match`] or [`Captures::get_group`]. /// /// An important note here is that a regex engine may not reset all of the /// slots to `None` values when no match occurs, or even when a match of /// a different pattern occurs. But this depends on how the regex engine /// implementation deals with slots. /// /// # Example /// /// This example shows how to get the underlying slots from a regex match. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::primitives::{PatternID, NonMaxUsize}, /// }; /// /// let re = PikeVM::new_many(&[ /// r"[a-z]+", /// r"[0-9]+", /// ])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "123", &mut caps); /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); /// // Note that the only guarantee we have here is that slots 2 and 3 /// // are set to correct values. The contents of the first two slots are /// // unspecified since the 0th pattern did not match. /// let expected = &[ /// None, /// None, /// NonMaxUsize::new(0), /// NonMaxUsize::new(3), /// ]; /// assert_eq!(expected, caps.slots()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn slots(&self) -> &[Option<NonMaxUsize>] { &self.slots } /// Returns the underlying slots as a mutable slice, where each slot stores /// a single offset. /// /// This tends to be most useful for regex engine implementations for /// writing offsets for matching capturing groups to slots. /// /// See [`Captures::slots`] for more information about slots. #[inline] pub fn slots_mut(&mut self) -> &mut [Option<NonMaxUsize>] { &mut self.slots } } impl core::fmt::Debug for Captures { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let mut dstruct = f.debug_struct("Captures"); dstruct.field("pid", &self.pid); if let Some(pid) = self.pid { dstruct.field("spans", &CapturesDebugMap { pid, caps: self }); } dstruct.finish() } } /// A little helper type to provide a nice map-like debug representation for /// our capturing group spans. struct CapturesDebugMap<'a> { pid: PatternID, caps: &'a Captures, } impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { struct Key<'a>(usize, Option<&'a str>); impl<'a> core::fmt::Debug for Key<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{}", self.0)?; if let Some(name) = self.1 { write!(f, "/{:?}", name)?; } Ok(()) } } let mut map = f.debug_map(); let names = self.caps.group_info().pattern_names(self.pid); for (group_index, maybe_name) in names.enumerate() { let key = Key(group_index, maybe_name); match self.caps.get_group(group_index) { None => map.entry(&key, &None::<()>), Some(span) => map.entry(&key, &span), }; } map.finish() } } /// An iterator over all capturing groups in a `Captures` value. /// /// This iterator includes capturing groups that did not participate in a /// match. See the [`Captures::iter`] method documentation for more details /// and examples. /// /// The lifetime parameter `'a` refers to the lifetime of the underlying /// `Captures` value. #[derive(Clone, Debug)] pub struct CapturesPatternIter<'a> { caps: &'a Captures, names: core::iter::Enumerate<GroupInfoPatternNames<'a>>, } impl<'a> Iterator for CapturesPatternIter<'a> { type Item = Option<Span>; fn next(&mut self) -> Option<Option<Span>> { let (group_index, _) = self.names.next()?; Some(self.caps.get_group(group_index)) } fn size_hint(&self) -> (usize, Option<usize>) { self.names.size_hint() } fn count(self) -> usize { self.names.count() } } impl<'a> ExactSizeIterator for CapturesPatternIter<'a> {} impl<'a> core::iter::FusedIterator for CapturesPatternIter<'a> {} /// Represents information about capturing groups in a compiled regex. /// /// The information encapsulated by this type consists of the following. For /// each pattern: /// /// * A map from every capture group name to its corresponding capture group /// index. /// * A map from every capture group index to its corresponding capture group /// name. /// * A map from capture group index to its corresponding slot index. A slot /// refers to one half of a capturing group. That is, a capture slot is either /// the start or end of a capturing group. A slot is usually the mechanism /// by which a regex engine records offsets for each capturing group during a /// search. /// /// A `GroupInfo` uses reference counting internally and is thus cheap to /// clone. /// /// # Mapping from capture groups to slots /// /// One of the main responsibilities of a `GroupInfo` is to build a mapping /// from `(PatternID, u32)` (where the `u32` is a capture index) to something /// called a "slot." As mentioned above, a slot refers to one half of a /// capturing group. Both combined provide the start and end offsets of /// a capturing group that participated in a match. /// /// **The mapping between group indices and slots is an API guarantee.** That /// is, the mapping won't change within a semver compatible release. /// /// Slots exist primarily because this is a convenient mechanism by which /// regex engines report group offsets at search time. For example, the /// [`nfa::thompson::State::Capture`](crate::nfa::thompson::State::Capture) /// NFA state includes the slot index. When a regex engine transitions through /// this state, it will likely use the slot index to write the current haystack /// offset to some region of memory. When a match is found, those slots are /// then reported to the caller, typically via a convenient abstraction like a /// [`Captures`] value. /// /// Because this crate provides first class support for multi-pattern regexes, /// and because of some performance related reasons, the mapping between /// capturing groups and slots is a little complex. However, in the case of a /// single pattern, the mapping can be described very simply: for all capture /// group indices `i`, its corresponding slots are at `i * 2` and `i * 2 + 1`. /// Notice that the pattern ID isn't involved at all here, because it only /// applies to a single-pattern regex, it is therefore always `0`. /// /// In the multi-pattern case, the mapping is a bit more complicated. To talk /// about it, we must define what we mean by "implicit" vs "explicit" /// capturing groups: /// /// * An **implicit** capturing group refers to the capturing group that is /// present for every pattern automatically, and corresponds to the overall /// match of a pattern. Every pattern has precisely one implicit capturing /// group. It is always unnamed and it always corresponds to the capture group /// index `0`. /// * An **explicit** capturing group refers to any capturing group that /// appears in the concrete syntax of the pattern. (Or, if an NFA was hand /// built without any concrete syntax, it refers to any capturing group with an /// index greater than `0`.) /// /// Some examples: /// /// * `\w+` has one implicit capturing group and zero explicit capturing /// groups. /// * `(\w+)` has one implicit group and one explicit group. /// * `foo(\d+)(?:\pL+)(\d+)` has one implicit group and two explicit groups. /// /// Turning back to the slot mapping, we can now state it as follows: /// /// * Given a pattern ID `pid`, the slots for its implicit group are always /// at `pid * 2` and `pid * 2 + 1`. /// * Given a pattern ID `0`, the slots for its explicit groups start /// at `group_info.pattern_len() * 2`. /// * Given a pattern ID `pid > 0`, the slots for its explicit groups start /// immediately following where the slots for the explicit groups of `pid - 1` /// end. /// /// In particular, while there is a concrete formula one can use to determine /// where the slots for the implicit group of any pattern are, there is no /// general formula for determining where the slots for explicit capturing /// groups are. This is because each pattern can contain a different number /// of groups. /// /// The intended way of getting the slots for a particular capturing group /// (whether implicit or explicit) is via the [`GroupInfo::slot`] or /// [`GroupInfo::slots`] method. /// /// See below for a concrete example of how capturing groups get mapped to /// slots. /// /// # Example /// /// This example shows how to build a new `GroupInfo` and query it for /// information. /// /// ``` /// use regex_automata::util::{captures::GroupInfo, primitives::PatternID}; /// /// let info = GroupInfo::new(vec![ /// vec![None, Some("foo")], /// vec![None], /// vec![None, None, None, Some("bar"), None], /// vec![None, None, Some("foo")], /// ])?; /// // The number of patterns being tracked. /// assert_eq!(4, info.pattern_len()); /// // We can query the number of groups for any pattern. /// assert_eq!(2, info.group_len(PatternID::must(0))); /// assert_eq!(1, info.group_len(PatternID::must(1))); /// assert_eq!(5, info.group_len(PatternID::must(2))); /// assert_eq!(3, info.group_len(PatternID::must(3))); /// // An invalid pattern always has zero groups. /// assert_eq!(0, info.group_len(PatternID::must(999))); /// // 2 slots per group /// assert_eq!(22, info.slot_len()); /// /// // We can map a group index for a particular pattern to its name, if /// // one exists. /// assert_eq!(Some("foo"), info.to_name(PatternID::must(3), 2)); /// assert_eq!(None, info.to_name(PatternID::must(2), 4)); /// // Or map a name to its group index. /// assert_eq!(Some(1), info.to_index(PatternID::must(0), "foo")); /// assert_eq!(Some(2), info.to_index(PatternID::must(3), "foo")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: mapping from capture groups to slots /// /// This example shows the specific mapping from capture group indices for /// each pattern to their corresponding slots. The slot values shown in this /// example are considered an API guarantee. /// /// ``` /// use regex_automata::util::{captures::GroupInfo, primitives::PatternID}; /// /// let info = GroupInfo::new(vec![ /// vec![None, Some("foo")], /// vec![None], /// vec![None, None, None, Some("bar"), None], /// vec![None, None, Some("foo")], /// ])?; /// /// // We first show the slots for each pattern's implicit group. /// assert_eq!(Some((0, 1)), info.slots(PatternID::must(0), 0)); /// assert_eq!(Some((2, 3)), info.slots(PatternID::must(1), 0)); /// assert_eq!(Some((4, 5)), info.slots(PatternID::must(2), 0)); /// assert_eq!(Some((6, 7)), info.slots(PatternID::must(3), 0)); /// /// // And now we show the slots for each pattern's explicit group. /// assert_eq!(Some((8, 9)), info.slots(PatternID::must(0), 1)); /// assert_eq!(Some((10, 11)), info.slots(PatternID::must(2), 1)); /// assert_eq!(Some((12, 13)), info.slots(PatternID::must(2), 2)); /// assert_eq!(Some((14, 15)), info.slots(PatternID::must(2), 3)); /// assert_eq!(Some((16, 17)), info.slots(PatternID::must(2), 4)); /// assert_eq!(Some((18, 19)), info.slots(PatternID::must(3), 1)); /// assert_eq!(Some((20, 21)), info.slots(PatternID::must(3), 2)); /// /// // Asking for the slots for an invalid pattern ID or even for an invalid /// // group index for a specific pattern will return None. So for example, /// // you're guaranteed to not get the slots for a different pattern than the /// // one requested. /// assert_eq!(None, info.slots(PatternID::must(5), 0)); /// assert_eq!(None, info.slots(PatternID::must(1), 1)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug, Default)] pub struct GroupInfo(Arc<GroupInfoInner>); impl GroupInfo { /// Creates a new group info from a sequence of patterns, where each /// sequence of patterns yields a sequence of possible group names. The /// index of each pattern in the sequence corresponds to its `PatternID`, /// and the index of each group in each pattern's sequence corresponds to /// its corresponding group index. /// /// While this constructor is very generic and therefore perhaps hard to /// chew on, an example of a valid concrete type that can be passed to /// this constructor is `Vec<Vec<Option<String>>>`. The outer `Vec` /// corresponds to the patterns, i.e., one `Vec<Option<String>>` per /// pattern. The inner `Vec` corresponds to the capturing groups for /// each pattern. The `Option<String>` corresponds to the name of the /// capturing group, if present. /// /// It is legal to pass an empty iterator to this constructor. It will /// return an empty group info with zero slots. An empty group info is /// useful for cases where you have no patterns or for cases where slots /// aren't being used at all (e.g., for most DFAs in this crate). /// /// # Errors /// /// This constructor returns an error if the given capturing groups are /// invalid in some way. Those reasons include, but are not necessarily /// limited to: /// /// * Too many patterns (i.e., `PatternID` would overflow). /// * Too many capturing groups (e.g., `u32` would overflow). /// * A pattern is given that has no capturing groups. (All patterns must /// have at least an implicit capturing group at index `0`.) /// * The capturing group at index `0` has a name. It must be unnamed. /// * There are duplicate capturing group names within the same pattern. /// (Multiple capturing groups with the same name may exist, but they /// must be in different patterns.) /// /// An example below shows how to trigger some of the above error /// conditions. /// /// # Example /// /// This example shows how to build a new `GroupInfo` and query it for /// information. /// /// ``` /// use regex_automata::util::captures::GroupInfo; /// /// let info = GroupInfo::new(vec![ /// vec![None, Some("foo")], /// vec![None], /// vec![None, None, None, Some("bar"), None], /// vec![None, None, Some("foo")], /// ])?; /// // The number of patterns being tracked. /// assert_eq!(4, info.pattern_len()); /// // 2 slots per group /// assert_eq!(22, info.slot_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: empty `GroupInfo` /// /// This example shows how to build a new `GroupInfo` and query it for /// information. /// /// ``` /// use regex_automata::util::captures::GroupInfo; /// /// let info = GroupInfo::empty(); /// // Everything is zero. /// assert_eq!(0, info.pattern_len()); /// assert_eq!(0, info.slot_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: error conditions /// /// This example shows how to provoke some of the ways in which building /// a `GroupInfo` can fail. /// /// ``` /// use regex_automata::util::captures::GroupInfo; /// /// // Either the group info is empty, or all patterns must have at least /// // one capturing group. /// assert!(GroupInfo::new(vec![ /// vec![None, Some("a")], // ok /// vec![None], // ok /// vec![], // not ok /// ]).is_err()); /// // Note that building an empty group info is OK. /// assert!(GroupInfo::new(Vec::<Vec<Option<String>>>::new()).is_ok()); /// /// // The first group in each pattern must correspond to an implicit /// // anonymous group. i.e., One that is not named. By convention, this /// // group corresponds to the overall match of a regex. Every other group /// // in a pattern is explicit and optional. /// assert!(GroupInfo::new(vec![vec![Some("foo")]]).is_err()); /// /// // There must not be duplicate group names within the same pattern. /// assert!(GroupInfo::new(vec![ /// vec![None, Some("foo"), Some("foo")], /// ]).is_err()); /// // But duplicate names across distinct patterns is OK. /// assert!(GroupInfo::new(vec![ /// vec![None, Some("foo")], /// vec![None, Some("foo")], /// ]).is_ok()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// There are other ways for building a `GroupInfo` to fail but are /// difficult to show. For example, if the number of patterns given would /// overflow `PatternID`. pub fn new<P, G, N>(pattern_groups: P) -> Result<GroupInfo, GroupInfoError> where P: IntoIterator<Item = G>, G: IntoIterator<Item = Option<N>>, N: AsRef<str>, { let mut group_info = GroupInfoInner { slot_ranges: vec![], name_to_index: vec![], index_to_name: vec![], memory_extra: 0, }; for (pattern_index, groups) in pattern_groups.into_iter().enumerate() { // If we can't convert the pattern index to an ID, then the caller // tried to build capture info for too many patterns. let pid = PatternID::new(pattern_index) .map_err(GroupInfoError::too_many_patterns)?; let mut groups_iter = groups.into_iter().enumerate(); match groups_iter.next() { None => return Err(GroupInfoError::missing_groups(pid)), Some((_, Some(_))) => { return Err(GroupInfoError::first_must_be_unnamed(pid)) } Some((_, None)) => {} } group_info.add_first_group(pid); // Now iterate over the rest, which correspond to all of the // (conventionally) explicit capture groups in a regex pattern. for (group_index, maybe_name) in groups_iter { // Just like for patterns, if the group index can't be // converted to a "small" index, then the caller has given too // many groups for a particular pattern. let group = SmallIndex::new(group_index).map_err(|_| { GroupInfoError::too_many_groups(pid, group_index) })?; group_info.add_explicit_group(pid, group, maybe_name)?; } } group_info.fixup_slot_ranges()?; Ok(GroupInfo(Arc::new(group_info))) } /// This creates an empty `GroupInfo`. /// /// This is a convenience routine for calling `GroupInfo::new` with an /// iterator that yields no elements. /// /// # Example /// /// This example shows how to build a new empty `GroupInfo` and query it /// for information. /// /// ``` /// use regex_automata::util::captures::GroupInfo; /// /// let info = GroupInfo::empty(); /// // Everything is zero. /// assert_eq!(0, info.pattern_len()); /// assert_eq!(0, info.all_group_len()); /// assert_eq!(0, info.slot_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn empty() -> GroupInfo { GroupInfo::new(core::iter::empty::<[Option<&str>; 0]>()) .expect("empty group info is always valid") } /// Return the capture group index corresponding to the given name in the /// given pattern. If no such capture group name exists in the given /// pattern, then this returns `None`. /// /// If the given pattern ID is invalid, then this returns `None`. /// /// This also returns `None` for all inputs if these captures are empty /// (e.g., built from an empty [`GroupInfo`]). To check whether captures /// are are present for a specific pattern, use [`GroupInfo::group_len`]. /// /// # Example /// /// This example shows how to find the capture index for the given pattern /// and group name. /// /// Remember that capture indices are relative to the pattern, such that /// the same capture index value may refer to different capturing groups /// for distinct patterns. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let (pid0, pid1) = (PatternID::must(0), PatternID::must(1)); /// /// let nfa = NFA::new_many(&[ /// r"a(?P<quux>\w+)z(?P<foo>\s+)", /// r"a(?P<foo>\d+)z", /// ])?; /// let groups = nfa.group_info(); /// assert_eq!(Some(2), groups.to_index(pid0, "foo")); /// // Recall that capture index 0 is always unnamed and refers to the /// // entire pattern. So the first capturing group present in the pattern /// // itself always starts at index 1. /// assert_eq!(Some(1), groups.to_index(pid1, "foo")); /// /// // And if a name does not exist for a particular pattern, None is /// // returned. /// assert!(groups.to_index(pid0, "quux").is_some()); /// assert!(groups.to_index(pid1, "quux").is_none()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn to_index(&self, pid: PatternID, name: &str) -> Option<usize> { let indices = self.0.name_to_index.get(pid.as_usize())?; indices.get(name).cloned().map(|i| i.as_usize()) } /// Return the capture name for the given index and given pattern. If the /// corresponding group does not have a name, then this returns `None`. /// /// If the pattern ID is invalid, then this returns `None`. /// /// If the group index is invalid for the given pattern, then this returns /// `None`. A group `index` is valid for a pattern `pid` in an `nfa` if and /// only if `index < nfa.pattern_capture_len(pid)`. /// /// This also returns `None` for all inputs if these captures are empty /// (e.g., built from an empty [`GroupInfo`]). To check whether captures /// are are present for a specific pattern, use [`GroupInfo::group_len`]. /// /// # Example /// /// This example shows how to find the capture group name for the given /// pattern and group index. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let (pid0, pid1) = (PatternID::must(0), PatternID::must(1)); /// /// let nfa = NFA::new_many(&[ /// r"a(?P<foo>\w+)z(\s+)x(\d+)", /// r"a(\d+)z(?P<foo>\s+)", /// ])?; /// let groups = nfa.group_info(); /// assert_eq!(None, groups.to_name(pid0, 0)); /// assert_eq!(Some("foo"), groups.to_name(pid0, 1)); /// assert_eq!(None, groups.to_name(pid0, 2)); /// assert_eq!(None, groups.to_name(pid0, 3)); /// /// assert_eq!(None, groups.to_name(pid1, 0)); /// assert_eq!(None, groups.to_name(pid1, 1)); /// assert_eq!(Some("foo"), groups.to_name(pid1, 2)); /// // '3' is not a valid capture index for the second pattern. /// assert_eq!(None, groups.to_name(pid1, 3)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn to_name(&self, pid: PatternID, group_index: usize) -> Option<&str> { let pattern_names = self.0.index_to_name.get(pid.as_usize())?; pattern_names.get(group_index)?.as_deref() } /// Return an iterator of all capture groups and their names (if present) /// for a particular pattern. /// /// If the given pattern ID is invalid or if this `GroupInfo` is empty, /// then the iterator yields no elements. /// /// The number of elements yielded by this iterator is always equal to /// the result of calling [`GroupInfo::group_len`] with the same /// `PatternID`. /// /// # Example /// /// This example shows how to get a list of all capture group names for /// a particular pattern. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let nfa = NFA::new(r"(a)(?P<foo>b)(c)(d)(?P<bar>e)")?; /// // The first is the implicit group that is always unnammed. The next /// // 5 groups are the explicit groups found in the concrete syntax above. /// let expected = vec![None, None, Some("foo"), None, None, Some("bar")]; /// let got: Vec<Option<&str>> = /// nfa.group_info().pattern_names(PatternID::ZERO).collect(); /// assert_eq!(expected, got); /// /// // Using an invalid pattern ID will result in nothing yielded. /// let got = nfa.group_info().pattern_names(PatternID::must(999)).count(); /// assert_eq!(0, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn pattern_names(&self, pid: PatternID) -> GroupInfoPatternNames<'_> { GroupInfoPatternNames { it: self .0 .index_to_name .get(pid.as_usize()) .map(|indices| indices.iter()) .unwrap_or([].iter()), } } /// Return an iterator of all capture groups for all patterns supported by /// this `GroupInfo`. Each item yielded is a triple of the group's pattern /// ID, index in the pattern and the group's name, if present. /// /// # Example /// /// This example shows how to get a list of all capture groups found in /// one NFA, potentially spanning multiple patterns. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let nfa = NFA::new_many(&[ /// r"(?P<foo>a)", /// r"a", /// r"(a)", /// ])?; /// let expected = vec![ /// (PatternID::must(0), 0, None), /// (PatternID::must(0), 1, Some("foo")), /// (PatternID::must(1), 0, None), /// (PatternID::must(2), 0, None), /// (PatternID::must(2), 1, None), /// ]; /// let got: Vec<(PatternID, usize, Option<&str>)> = /// nfa.group_info().all_names().collect(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Unlike other capturing group related routines, this routine doesn't /// panic even if captures aren't enabled on this NFA: /// /// ``` /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; /// /// let nfa = NFA::compiler() /// .configure(NFA::config().which_captures(WhichCaptures::None)) /// .build_many(&[ /// r"(?P<foo>a)", /// r"a", /// r"(a)", /// ])?; /// // When captures aren't enabled, there's nothing to return. /// assert_eq!(0, nfa.group_info().all_names().count()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn all_names(&self) -> GroupInfoAllNames<'_> { GroupInfoAllNames { group_info: self, pids: PatternID::iter(self.pattern_len()), current_pid: None, names: None, } } /// Returns the starting and ending slot corresponding to the given /// capturing group for the given pattern. The ending slot is always one /// more than the starting slot returned. /// /// Note that this is like [`GroupInfo::slot`], except that it also returns /// the ending slot value for convenience. /// /// If either the pattern ID or the capture index is invalid, then this /// returns None. /// /// # Example /// /// This example shows that the starting slots for the first capturing /// group of each pattern are distinct. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let nfa = NFA::new_many(&["a", "b"])?; /// assert_ne!( /// nfa.group_info().slots(PatternID::must(0), 0), /// nfa.group_info().slots(PatternID::must(1), 0), /// ); /// /// // Also, the start and end slot values are never equivalent. /// let (start, end) = nfa.group_info().slots(PatternID::ZERO, 0).unwrap(); /// assert_ne!(start, end); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn slots( &self, pid: PatternID, group_index: usize, ) -> Option<(usize, usize)> { // Since 'slot' only even returns valid starting slots, we know that // there must also be an end slot and that end slot is always one more // than the start slot. self.slot(pid, group_index).map(|start| (start, start + 1)) } /// Returns the starting slot corresponding to the given capturing group /// for the given pattern. The ending slot is always one more than the /// value returned. /// /// If either the pattern ID or the capture index is invalid, then this /// returns None. /// /// # Example /// /// This example shows that the starting slots for the first capturing /// group of each pattern are distinct. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, PatternID}; /// /// let nfa = NFA::new_many(&["a", "b"])?; /// assert_ne!( /// nfa.group_info().slot(PatternID::must(0), 0), /// nfa.group_info().slot(PatternID::must(1), 0), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn slot(&self, pid: PatternID, group_index: usize) -> Option<usize> { if group_index >= self.group_len(pid) { return None; } // At this point, we know that 'pid' refers to a real pattern and that // 'group_index' refers to a real group. We therefore also know that // the pattern and group can be combined to return a correct slot. // That's why we don't need to use checked arithmetic below. if group_index == 0 { Some(pid.as_usize() * 2) } else { // As above, we don't need to check that our slot is less than the // end of our range since we already know the group index is a // valid index for the given pattern. let (start, _) = self.0.slot_ranges[pid]; Some(start.as_usize() + ((group_index - 1) * 2)) } } /// Returns the total number of patterns in this `GroupInfo`. /// /// This may return zero if the `GroupInfo` was constructed with no /// patterns. /// /// This is guaranteed to be no bigger than [`PatternID::LIMIT`] because /// `GroupInfo` construction will fail if too many patterns are added. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::NFA; /// /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; /// assert_eq!(3, nfa.group_info().pattern_len()); /// /// let nfa = NFA::never_match(); /// assert_eq!(0, nfa.group_info().pattern_len()); /// /// let nfa = NFA::always_match(); /// assert_eq!(1, nfa.group_info().pattern_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn pattern_len(&self) -> usize { self.0.pattern_len() } /// Return the number of capture groups in a pattern. /// /// If the pattern ID is invalid, then this returns `0`. /// /// # Example /// /// This example shows how the values returned by this routine may vary /// for different patterns and NFA configurations. /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, WhichCaptures}, PatternID}; /// /// let nfa = NFA::new(r"(a)(b)(c)")?; /// // There are 3 explicit groups in the pattern's concrete syntax and /// // 1 unnamed and implicit group spanning the entire pattern. /// assert_eq!(4, nfa.group_info().group_len(PatternID::ZERO)); /// /// let nfa = NFA::new(r"abc")?; /// // There is just the unnamed implicit group. /// assert_eq!(1, nfa.group_info().group_len(PatternID::ZERO)); /// /// let nfa = NFA::compiler() /// .configure(NFA::config().which_captures(WhichCaptures::None)) /// .build(r"abc")?; /// // We disabled capturing groups, so there are none. /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); /// /// let nfa = NFA::compiler() /// .configure(NFA::config().which_captures(WhichCaptures::None)) /// .build(r"(a)(b)(c)")?; /// // We disabled capturing groups, so there are none, even if there are /// // explicit groups in the concrete syntax. /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn group_len(&self, pid: PatternID) -> usize { self.0.group_len(pid) } /// Return the total number of capture groups across all patterns. /// /// This includes implicit groups that represent the entire match of a /// pattern. /// /// # Example /// /// This example shows how the values returned by this routine may vary /// for different patterns and NFA configurations. /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, WhichCaptures}, PatternID}; /// /// let nfa = NFA::new(r"(a)(b)(c)")?; /// // There are 3 explicit groups in the pattern's concrete syntax and /// // 1 unnamed and implicit group spanning the entire pattern. /// assert_eq!(4, nfa.group_info().all_group_len()); /// /// let nfa = NFA::new(r"abc")?; /// // There is just the unnamed implicit group. /// assert_eq!(1, nfa.group_info().all_group_len()); /// /// let nfa = NFA::new_many(&["(a)", "b", "(c)"])?; /// // Each pattern has one implicit groups, and two /// // patterns have one explicit group each. /// assert_eq!(5, nfa.group_info().all_group_len()); /// /// let nfa = NFA::compiler() /// .configure(NFA::config().which_captures(WhichCaptures::None)) /// .build(r"abc")?; /// // We disabled capturing groups, so there are none. /// assert_eq!(0, nfa.group_info().all_group_len()); /// /// let nfa = NFA::compiler() /// .configure(NFA::config().which_captures(WhichCaptures::None)) /// .build(r"(a)(b)(c)")?; /// // We disabled capturing groups, so there are none, even if there are /// // explicit groups in the concrete syntax. /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn all_group_len(&self) -> usize { self.slot_len() / 2 } /// Returns the total number of slots in this `GroupInfo` across all /// patterns. /// /// The total number of slots is always twice the total number of capturing /// groups, including both implicit and explicit groups. /// /// # Example /// /// This example shows the relationship between the number of capturing /// groups and slots. /// /// ``` /// use regex_automata::util::captures::GroupInfo; /// /// // There are 11 total groups here. /// let info = GroupInfo::new(vec![ /// vec![None, Some("foo")], /// vec![None], /// vec![None, None, None, Some("bar"), None], /// vec![None, None, Some("foo")], /// ])?; /// // 2 slots per group gives us 11*2=22 slots. /// assert_eq!(22, info.slot_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn slot_len(&self) -> usize { self.0.small_slot_len().as_usize() } /// Returns the total number of slots for implicit capturing groups. /// /// This is like [`GroupInfo::slot_len`], except it doesn't include the /// explicit slots for each pattern. Since there are always exactly 2 /// implicit slots for each pattern, the number of implicit slots is always /// equal to twice the number of patterns. /// /// # Example /// /// This example shows the relationship between the number of capturing /// groups, implicit slots and explicit slots. /// /// ``` /// use regex_automata::util::captures::GroupInfo; /// /// // There are 11 total groups here. /// let info = GroupInfo::new(vec![vec![None, Some("foo"), Some("bar")]])?; /// // 2 slots per group gives us 11*2=22 slots. /// assert_eq!(6, info.slot_len()); /// // 2 implicit slots per pattern gives us 2 implicit slots since there /// // is 1 pattern. /// assert_eq!(2, info.implicit_slot_len()); /// // 2 explicit capturing groups gives us 2*2=4 explicit slots. /// assert_eq!(4, info.explicit_slot_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn implicit_slot_len(&self) -> usize { self.pattern_len() * 2 } /// Returns the total number of slots for explicit capturing groups. /// /// This is like [`GroupInfo::slot_len`], except it doesn't include the /// implicit slots for each pattern. (There are always 2 implicit slots for /// each pattern.) /// /// For a non-empty `GroupInfo`, it is always the case that `slot_len` is /// strictly greater than `explicit_slot_len`. For an empty `GroupInfo`, /// both the total number of slots and the number of explicit slots is /// `0`. /// /// # Example /// /// This example shows the relationship between the number of capturing /// groups, implicit slots and explicit slots. /// /// ``` /// use regex_automata::util::captures::GroupInfo; /// /// // There are 11 total groups here. /// let info = GroupInfo::new(vec![vec![None, Some("foo"), Some("bar")]])?; /// // 2 slots per group gives us 11*2=22 slots. /// assert_eq!(6, info.slot_len()); /// // 2 implicit slots per pattern gives us 2 implicit slots since there /// // is 1 pattern. /// assert_eq!(2, info.implicit_slot_len()); /// // 2 explicit capturing groups gives us 2*2=4 explicit slots. /// assert_eq!(4, info.explicit_slot_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn explicit_slot_len(&self) -> usize { self.slot_len().saturating_sub(self.implicit_slot_len()) } /// Returns the memory usage, in bytes, of this `GroupInfo`. /// /// This does **not** include the stack size used up by this `GroupInfo`. /// To compute that, use `std::mem::size_of::<GroupInfo>()`. #[inline] pub fn memory_usage(&self) -> usize { use core::mem::size_of as s; s::<GroupInfoInner>() + self.0.slot_ranges.len() * s::<(SmallIndex, SmallIndex)>() + self.0.name_to_index.len() * s::<CaptureNameMap>() + self.0.index_to_name.len() * s::<Vec<Option<Arc<str>>>>() + self.0.memory_extra } } /// A map from capture group name to its corresponding capture group index. /// /// This type is actually wrapped inside a Vec indexed by pattern ID on a /// `GroupInfo`, since multiple patterns may have the same capture group name. /// That is, each pattern gets its own namespace of capture group names. /// /// Perhaps a more memory efficient representation would be /// HashMap<(PatternID, Arc<str>), usize>, but this makes it difficult to look /// up a capture index by name without producing a `Arc<str>`, which requires /// an allocation. To fix this, I think we'd need to define our own unsized /// type or something? Anyway, I didn't give this much thought since it /// probably doesn't matter much in the grand scheme of things. But it did /// stand out to me as mildly wasteful. #[cfg(feature = "std")] type CaptureNameMap = std::collections::HashMap<Arc<str>, SmallIndex>; #[cfg(not(feature = "std"))] type CaptureNameMap = alloc::collections::BTreeMap<Arc<str>, SmallIndex>; /// The inner guts of `GroupInfo`. This type only exists so that it can /// be wrapped in an `Arc` to make `GroupInfo` reference counted. #[derive(Debug, Default)] struct GroupInfoInner { slot_ranges: Vec<(SmallIndex, SmallIndex)>, name_to_index: Vec<CaptureNameMap>, index_to_name: Vec<Vec<Option<Arc<str>>>>, memory_extra: usize, } impl GroupInfoInner { /// This adds the first unnamed group for the given pattern ID. The given /// pattern ID must be zero if this is the first time this method is /// called, or must be exactly one more than the pattern ID supplied to the /// previous call to this method. (This method panics if this rule is /// violated.) /// /// This can be thought of as initializing the GroupInfo state for the /// given pattern and closing off the state for any previous pattern. fn add_first_group(&mut self, pid: PatternID) { assert_eq!(pid.as_usize(), self.slot_ranges.len()); assert_eq!(pid.as_usize(), self.name_to_index.len()); assert_eq!(pid.as_usize(), self.index_to_name.len()); // This is the start of our slots for the explicit capturing groups. // Note that since the slots for the 0th group for every pattern appear // before any slots for the nth group (where n > 0) in any pattern, we // will have to fix up the slot ranges once we know how many patterns // we've added capture groups for. let slot_start = self.small_slot_len(); self.slot_ranges.push((slot_start, slot_start)); self.name_to_index.push(CaptureNameMap::new()); self.index_to_name.push(vec![None]); self.memory_extra += core::mem::size_of::<Option<Arc<str>>>(); } /// Add an explicit capturing group for the given pattern with the given /// index. If the group has a name, then that must be given as well. /// /// Note that every capturing group except for the first or zeroth group is /// explicit. /// /// This returns an error if adding this group would result in overflowing /// slot indices or if a capturing group with the same name for this /// pattern has already been added. fn add_explicit_group<N: AsRef<str>>( &mut self, pid: PatternID, group: SmallIndex, maybe_name: Option<N>, ) -> Result<(), GroupInfoError> { // We also need to check that the slot index generated for // this group is also valid. Although, this is a little weird // because we offset these indices below, at which point, we'll // have to recheck them. Gosh this is annoying. Note that // the '+2' below is OK because 'end' is guaranteed to be less // than isize::MAX. let end = &mut self.slot_ranges[pid].1; *end = SmallIndex::new(end.as_usize() + 2).map_err(|_| { GroupInfoError::too_many_groups(pid, group.as_usize()) })?; if let Some(name) = maybe_name { let name = Arc::<str>::from(name.as_ref()); if self.name_to_index[pid].contains_key(&*name) { return Err(GroupInfoError::duplicate(pid, &name)); } let len = name.len(); self.name_to_index[pid].insert(Arc::clone(&name), group); self.index_to_name[pid].push(Some(name)); // Adds the memory used by the Arc<str> in both maps. self.memory_extra += 2 * (len + core::mem::size_of::<Option<Arc<str>>>()); // And also the value entry for the 'name_to_index' map. // This is probably an underestimate for 'name_to_index' since // hashmaps/btrees likely have some non-zero overhead, but we // assume here that they have zero overhead. self.memory_extra += core::mem::size_of::<SmallIndex>(); } else { self.index_to_name[pid].push(None); self.memory_extra += core::mem::size_of::<Option<Arc<str>>>(); } // This is a sanity assert that checks that our group index // is in line with the number of groups added so far for this // pattern. assert_eq!(group.one_more(), self.group_len(pid)); // And is also in line with the 'index_to_name' map. assert_eq!(group.one_more(), self.index_to_name[pid].len()); Ok(()) } /// This corrects the slot ranges to account for the slots corresponding /// to the zeroth group of each pattern. That is, every slot range is /// offset by 'pattern_len() * 2', since each pattern uses two slots to /// represent the zeroth group. fn fixup_slot_ranges(&mut self) -> Result<(), GroupInfoError> { use crate::util::primitives::IteratorIndexExt; // Since we know number of patterns fits in PatternID and // PatternID::MAX < isize::MAX, it follows that multiplying by 2 will // never overflow usize. let offset = self.pattern_len().checked_mul(2).unwrap(); for (pid, &mut (ref mut start, ref mut end)) in self.slot_ranges.iter_mut().with_pattern_ids() { let group_len = 1 + ((end.as_usize() - start.as_usize()) / 2); let new_end = match end.as_usize().checked_add(offset) { Some(new_end) => new_end, None => { return Err(GroupInfoError::too_many_groups( pid, group_len, )) } }; *end = SmallIndex::new(new_end).map_err(|_| { GroupInfoError::too_many_groups(pid, group_len) })?; // Since start <= end, if end is valid then start must be too. *start = SmallIndex::new(start.as_usize() + offset).unwrap(); } Ok(()) } /// Return the total number of patterns represented by this capture slot /// info. fn pattern_len(&self) -> usize { self.slot_ranges.len() } /// Return the total number of capturing groups for the given pattern. If /// the given pattern isn't valid for this capture slot info, then 0 is /// returned. fn group_len(&self, pid: PatternID) -> usize { let (start, end) = match self.slot_ranges.get(pid.as_usize()) { None => return 0, Some(range) => range, }; // The difference between any two SmallIndex values always fits in a // usize since we know that SmallIndex::MAX <= isize::MAX-1. We also // know that start<=end by construction and that the number of groups // never exceeds SmallIndex and thus never overflows usize. 1 + ((end.as_usize() - start.as_usize()) / 2) } /// Return the total number of slots in this capture slot info as a /// "small index." fn small_slot_len(&self) -> SmallIndex { // Since slots are allocated in order of pattern (starting at 0) and // then in order of capture group, it follows that the number of slots // is the end of the range of slots for the last pattern. This is // true even when the last pattern has no capturing groups, since // 'slot_ranges' will still represent it explicitly with an empty // range. self.slot_ranges.last().map_or(SmallIndex::ZERO, |&(_, end)| end) } } /// An error that may occur when building a `GroupInfo`. /// /// Building a `GroupInfo` does a variety of checks to make sure the /// capturing groups satisfy a number of invariants. This includes, but is not /// limited to, ensuring that the first capturing group is unnamed and that /// there are no duplicate capture groups for a specific pattern. #[derive(Clone, Debug)] pub struct GroupInfoError { kind: GroupInfoErrorKind, } /// The kind of error that occurs when building a `GroupInfo` fails. /// /// We keep this un-exported because it's not clear how useful it is to /// export it. #[derive(Clone, Debug)] enum GroupInfoErrorKind { /// This occurs when too many patterns have been added. i.e., It would /// otherwise overflow a `PatternID`. TooManyPatterns { err: PatternIDError }, /// This occurs when too many capturing groups have been added for a /// particular pattern. TooManyGroups { /// The ID of the pattern that had too many groups. pattern: PatternID, /// The minimum number of groups that the caller has tried to add for /// a pattern. minimum: usize, }, /// An error that occurs when a pattern has no capture groups. Either the /// group info must be empty, or all patterns must have at least one group /// (corresponding to the unnamed group for the entire pattern). MissingGroups { /// The ID of the pattern that had no capturing groups. pattern: PatternID, }, /// An error that occurs when one tries to provide a name for the capture /// group at index 0. This capturing group must currently always be /// unnamed. FirstMustBeUnnamed { /// The ID of the pattern that was found to have a named first /// capturing group. pattern: PatternID, }, /// An error that occurs when duplicate capture group names for the same /// pattern are added. /// /// NOTE: At time of writing, this error can never occur if you're using /// regex-syntax, since the parser itself will reject patterns with /// duplicate capture group names. This error can only occur when the /// builder is used to hand construct NFAs. Duplicate { /// The pattern in which the duplicate capture group name was found. pattern: PatternID, /// The duplicate name. name: String, }, } impl GroupInfoError { fn too_many_patterns(err: PatternIDError) -> GroupInfoError { GroupInfoError { kind: GroupInfoErrorKind::TooManyPatterns { err } } } fn too_many_groups(pattern: PatternID, minimum: usize) -> GroupInfoError { GroupInfoError { kind: GroupInfoErrorKind::TooManyGroups { pattern, minimum }, } } fn missing_groups(pattern: PatternID) -> GroupInfoError { GroupInfoError { kind: GroupInfoErrorKind::MissingGroups { pattern } } } fn first_must_be_unnamed(pattern: PatternID) -> GroupInfoError { GroupInfoError { kind: GroupInfoErrorKind::FirstMustBeUnnamed { pattern }, } } fn duplicate(pattern: PatternID, name: &str) -> GroupInfoError { GroupInfoError { kind: GroupInfoErrorKind::Duplicate { pattern, name: String::from(name), }, } } } #[cfg(feature = "std")] impl std::error::Error for GroupInfoError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self.kind { GroupInfoErrorKind::TooManyPatterns { .. } | GroupInfoErrorKind::TooManyGroups { .. } | GroupInfoErrorKind::MissingGroups { .. } | GroupInfoErrorKind::FirstMustBeUnnamed { .. } | GroupInfoErrorKind::Duplicate { .. } => None, } } } impl core::fmt::Display for GroupInfoError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { use self::GroupInfoErrorKind::*; match self.kind { TooManyPatterns { ref err } => { write!(f, "too many patterns to build capture info: {}", err) } TooManyGroups { pattern, minimum } => { write!( f, "too many capture groups (at least {}) were \ found for pattern {}", minimum, pattern.as_usize() ) } MissingGroups { pattern } => write!( f, "no capturing groups found for pattern {} \ (either all patterns have zero groups or all patterns have \ at least one group)", pattern.as_usize(), ), FirstMustBeUnnamed { pattern } => write!( f, "first capture group (at index 0) for pattern {} has a name \ (it must be unnamed)", pattern.as_usize(), ), Duplicate { pattern, ref name } => write!( f, "duplicate capture group name '{}' found for pattern {}", name, pattern.as_usize(), ), } } } /// An iterator over capturing groups and their names for a specific pattern. /// /// This iterator is created by [`GroupInfo::pattern_names`]. /// /// The lifetime parameter `'a` refers to the lifetime of the `GroupInfo` /// from which this iterator was created. #[derive(Clone, Debug)] pub struct GroupInfoPatternNames<'a> { it: core::slice::Iter<'a, Option<Arc<str>>>, } impl GroupInfoPatternNames<'static> { fn empty() -> GroupInfoPatternNames<'static> { GroupInfoPatternNames { it: [].iter() } } } impl<'a> Iterator for GroupInfoPatternNames<'a> { type Item = Option<&'a str>; fn next(&mut self) -> Option<Option<&'a str>> { self.it.next().map(|x| x.as_deref()) } fn size_hint(&self) -> (usize, Option<usize>) { self.it.size_hint() } fn count(self) -> usize { self.it.count() } } impl<'a> ExactSizeIterator for GroupInfoPatternNames<'a> {} impl<'a> core::iter::FusedIterator for GroupInfoPatternNames<'a> {} /// An iterator over capturing groups and their names for a `GroupInfo`. /// /// This iterator is created by [`GroupInfo::all_names`]. /// /// The lifetime parameter `'a` refers to the lifetime of the `GroupInfo` /// from which this iterator was created. #[derive(Debug)] pub struct GroupInfoAllNames<'a> { group_info: &'a GroupInfo, pids: PatternIDIter, current_pid: Option<PatternID>, names: Option<core::iter::Enumerate<GroupInfoPatternNames<'a>>>, } impl<'a> Iterator for GroupInfoAllNames<'a> { type Item = (PatternID, usize, Option<&'a str>); fn next(&mut self) -> Option<(PatternID, usize, Option<&'a str>)> { // If the group info has no captures, then we never have anything // to yield. We need to consider this case explicitly (at time of // writing) because 'pattern_capture_names' will panic if captures // aren't enabled. if self.group_info.0.index_to_name.is_empty() { return None; } if self.current_pid.is_none() { self.current_pid = Some(self.pids.next()?); } let pid = self.current_pid.unwrap(); if self.names.is_none() { self.names = Some(self.group_info.pattern_names(pid).enumerate()); } let (group_index, name) = match self.names.as_mut().unwrap().next() { Some((group_index, name)) => (group_index, name), None => { self.current_pid = None; self.names = None; return self.next(); } }; Some((pid, group_index, name)) } } <file_sep>/regex-automata/src/nfa/thompson/backtrack.rs /*! An NFA backed bounded backtracker for executing regex searches with capturing groups. This module provides a [`BoundedBacktracker`] that works by simulating an NFA using the classical backtracking algorithm with a twist: it avoids redoing work that it has done before and thereby avoids worst case exponential time. In exchange, it can only be used on "short" haystacks. Its advantage is that is can be faster than the [`PikeVM`](thompson::pikevm::PikeVM) in many cases because it does less book-keeping. */ use alloc::{vec, vec::Vec}; use crate::{ nfa::thompson::{self, BuildError, State, NFA}, util::{ captures::Captures, empty, iter, prefilter::Prefilter, primitives::{NonMaxUsize, PatternID, SmallIndex, StateID}, search::{Anchored, HalfMatch, Input, Match, MatchError, Span}, }, }; /// Returns the minimum visited capacity for the given haystack. /// /// This function can be used as the argument to [`Config::visited_capacity`] /// in order to guarantee that a backtracking search for the given `input` /// won't return an error when using a [`BoundedBacktracker`] built from the /// given `NFA`. /// /// This routine exists primarily as a way to test that the bounded backtracker /// works correctly when its capacity is set to the smallest possible amount. /// Still, it may be useful in cases where you know you want to use the bounded /// backtracker for a specific input, and just need to know what visited /// capacity to provide to make it work. /// /// Be warned that this number could be quite large as it is multiplicative in /// the size the given NFA and haystack. pub fn min_visited_capacity(nfa: &NFA, input: &Input<'_>) -> usize { div_ceil(nfa.states().len() * (input.get_span().len() + 1), 8) } /// The configuration used for building a bounded backtracker. /// /// A bounded backtracker configuration is a simple data object that is /// typically used with [`Builder::configure`]. #[derive(Clone, Debug, Default)] pub struct Config { pre: Option<Option<Prefilter>>, visited_capacity: Option<usize>, } impl Config { /// Return a new default regex configuration. pub fn new() -> Config { Config::default() } /// Set a prefilter to be used whenever a start state is entered. /// /// A [`Prefilter`] in this context is meant to accelerate searches by /// looking for literal prefixes that every match for the corresponding /// pattern (or patterns) must start with. Once a prefilter produces a /// match, the underlying search routine continues on to try and confirm /// the match. /// /// Be warned that setting a prefilter does not guarantee that the search /// will be faster. While it's usually a good bet, if the prefilter /// produces a lot of false positive candidates (i.e., positions matched /// by the prefilter but not by the regex), then the overall result can /// be slower than if you had just executed the regex engine without any /// prefilters. /// /// By default no prefilter is set. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// util::prefilter::Prefilter, /// Input, Match, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); /// let re = BoundedBacktracker::builder() /// .configure(BoundedBacktracker::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("foo1 barfox bar"); /// assert_eq!( /// Some(Match::must(0, 5..11)), /// re.try_find(&mut cache, input)?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Be warned though that an incorrect prefilter can lead to incorrect /// results! /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// util::prefilter::Prefilter, /// Input, HalfMatch, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); /// let re = BoundedBacktracker::builder() /// .configure(BoundedBacktracker::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("foo1 barfox bar"); /// // No match reported even though there clearly is one! /// assert_eq!(None, re.try_find(&mut cache, input)?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn prefilter(mut self, pre: Option<Prefilter>) -> Config { self.pre = Some(pre); self } /// Set the visited capacity used to bound backtracking. /// /// The visited capacity represents the amount of heap memory (in bytes) to /// allocate toward tracking which parts of the backtracking search have /// been done before. The heap memory needed for any particular search is /// proportional to `haystack.len() * nfa.states().len()`, which an be /// quite large. Therefore, the bounded backtracker is typically only able /// to run on shorter haystacks. /// /// For a given regex, increasing the visited capacity means that the /// maximum haystack length that can be searched is increased. The /// [`BoundedBacktracker::max_haystack_len`] method returns that maximum. /// /// The default capacity is a reasonable but empirically chosen size. /// /// # Example /// /// As with other regex engines, Unicode is what tends to make the bounded /// backtracker less useful by making the maximum haystack length quite /// small. If necessary, increasing the visited capacity using this routine /// will increase the maximum haystack length at the cost of using more /// memory. /// /// Note though that the specific maximum values here are not an API /// guarantee. The default visited capacity is subject to change and not /// covered by semver. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; /// /// // Unicode inflates the size of the underlying NFA quite a bit, and /// // thus means that the backtracker can only handle smaller haystacks, /// // assuming that the visited capacity remains unchanged. /// let re = BoundedBacktracker::new(r"\w+")?; /// assert!(re.max_haystack_len() <= 7_000); /// // But we can increase the visited capacity to handle bigger haystacks! /// let re = BoundedBacktracker::builder() /// .configure(BoundedBacktracker::config().visited_capacity(1<<20)) /// .build(r"\w+")?; /// assert!(re.max_haystack_len() >= 25_000); /// assert!(re.max_haystack_len() <= 28_000); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn visited_capacity(mut self, capacity: usize) -> Config { self.visited_capacity = Some(capacity); self } /// Returns the prefilter set in this configuration, if one at all. pub fn get_prefilter(&self) -> Option<&Prefilter> { self.pre.as_ref().unwrap_or(&None).as_ref() } /// Returns the configured visited capacity. /// /// Note that the actual capacity used may be slightly bigger than the /// configured capacity. pub fn get_visited_capacity(&self) -> usize { const DEFAULT: usize = 256 * (1 << 10); // 256 KB self.visited_capacity.unwrap_or(DEFAULT) } /// Overwrite the default configuration such that the options in `o` are /// always used. If an option in `o` is not set, then the corresponding /// option in `self` is used. If it's not set in `self` either, then it /// remains not set. pub(crate) fn overwrite(&self, o: Config) -> Config { Config { pre: o.pre.or_else(|| self.pre.clone()), visited_capacity: o.visited_capacity.or(self.visited_capacity), } } } /// A builder for a bounded backtracker. /// /// This builder permits configuring options for the syntax of a pattern, the /// NFA construction and the `BoundedBacktracker` construction. This builder /// is different from a general purpose regex builder in that it permits fine /// grain configuration of the construction process. The trade off for this is /// complexity, and the possibility of setting a configuration that might not /// make sense. For example, there are two different UTF-8 modes: /// /// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls /// whether the pattern itself can contain sub-expressions that match invalid /// UTF-8. /// * [`thompson::Config::utf8`] controls how the regex iterators themselves /// advance the starting position of the next search when a match with zero /// length is found. /// /// Generally speaking, callers will want to either enable all of these or /// disable all of these. /// /// # Example /// /// This example shows how to disable UTF-8 mode in the syntax and the regex /// itself. This is generally what you want for matching on arbitrary bytes. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, backtrack::BoundedBacktracker}, /// util::syntax, /// Match, /// }; /// /// let re = BoundedBacktracker::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let mut cache = re.create_cache(); /// /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Ok(Match::must(0, 1..9))); /// let got = re.try_find_iter(&mut cache, haystack).next(); /// assert_eq!(expected, got); /// // Notice that `(?-u:[^b])` matches invalid UTF-8, /// // but the subsequent `.*` does not! Disabling UTF-8 /// // on the syntax permits this. /// // /// // N.B. This example does not show the impact of /// // disabling UTF-8 mode on a BoundedBacktracker Config, since that /// // only impacts regexes that can produce matches of /// // length 0. /// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap()?.range()]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Builder { config: Config, #[cfg(feature = "syntax")] thompson: thompson::Compiler, } impl Builder { /// Create a new BoundedBacktracker builder with its default configuration. pub fn new() -> Builder { Builder { config: Config::default(), #[cfg(feature = "syntax")] thompson: thompson::Compiler::new(), } } /// Build a `BoundedBacktracker` from the given pattern. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(feature = "syntax")] pub fn build( &self, pattern: &str, ) -> Result<BoundedBacktracker, BuildError> { self.build_many(&[pattern]) } /// Build a `BoundedBacktracker` from the given patterns. #[cfg(feature = "syntax")] pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<BoundedBacktracker, BuildError> { let nfa = self.thompson.build_many(patterns)?; self.build_from_nfa(nfa) } /// Build a `BoundedBacktracker` directly from its NFA. /// /// Note that when using this method, any configuration that applies to the /// construction of the NFA itself will of course be ignored, since the NFA /// given here is already built. pub fn build_from_nfa( &self, nfa: NFA, ) -> Result<BoundedBacktracker, BuildError> { nfa.look_set_any().available().map_err(BuildError::word)?; Ok(BoundedBacktracker { config: self.config.clone(), nfa }) } /// Apply the given `BoundedBacktracker` configuration options to this /// builder. pub fn configure(&mut self, config: Config) -> &mut Builder { self.config = self.config.overwrite(config); self } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. /// /// These settings only apply when constructing a `BoundedBacktracker` /// directly from a pattern. #[cfg(feature = "syntax")] pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { self.thompson.syntax(config); self } /// Set the Thompson NFA configuration for this builder using /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). /// /// This permits setting things like if additional time should be spent /// shrinking the size of the NFA. /// /// These settings only apply when constructing a `BoundedBacktracker` /// directly from a pattern. #[cfg(feature = "syntax")] pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { self.thompson.configure(config); self } } /// A backtracking regex engine that bounds its execution to avoid exponential /// blow-up. /// /// This regex engine only implements leftmost-first match semantics and /// only supports leftmost searches. It effectively does the same thing as a /// [`PikeVM`](thompson::pikevm::PikeVM), but typically does it faster because /// it doesn't have to worry about copying capturing group spans for most NFA /// states. Instead, the backtracker can maintain one set of captures (provided /// by the caller) and never needs to copy them. In exchange, the backtracker /// bounds itself to ensure it doesn't exhibit worst case exponential time. /// This results in the backtracker only being able to handle short haystacks /// given reasonable memory usage. /// /// # Searches may return an error! /// /// By design, this backtracking regex engine is bounded. This bound is /// implemented by not visiting any combination of NFA state ID and position /// in a haystack more than once. Thus, the total memory required to bound /// backtracking is proportional to `haystack.len() * nfa.states().len()`. /// This can obviously get quite large, since large haystacks aren't terribly /// uncommon. To avoid using exorbitant memory, the capacity is bounded by /// a fixed limit set via [`Config::visited_capacity`]. Thus, if the total /// capacity required for a particular regex and a haystack exceeds this /// capacity, then the search routine will return an error. /// /// Unlike other regex engines that may return an error at search time (like /// the DFA or the hybrid NFA/DFA), there is no way to guarantee that a bounded /// backtracker will work for every haystack. Therefore, this regex engine /// _only_ exposes fallible search routines to avoid the footgun of panicking /// when running a search on a haystack that is too big. /// /// If one wants to use the fallible search APIs without handling the /// error, the only way to guarantee an error won't occur from the /// haystack length is to ensure the haystack length does not exceed /// [`BoundedBacktracker::max_haystack_len`]. /// /// # Example: Unicode word boundaries /// /// This example shows that the bounded backtracker implements Unicode word /// boundaries correctly by default. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::backtrack::BoundedBacktracker, Match}; /// /// let re = BoundedBacktracker::new(r"\b\w+\b")?; /// let mut cache = re.create_cache(); /// /// let mut it = re.try_find_iter(&mut cache, "Шерлок Холмс"); /// assert_eq!(Some(Ok(Match::must(0, 0..12))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 13..23))), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: multiple regex patterns /// /// The bounded backtracker supports searching for multiple patterns /// simultaneously, just like other regex engines. Note though that because it /// uses a backtracking strategy, this regex engine is unlikely to scale well /// as more patterns are added. But then again, as more patterns are added, the /// maximum haystack length allowed will also shorten (assuming the visited /// capacity remains invariant). /// /// ``` /// use regex_automata::{nfa::thompson::backtrack::BoundedBacktracker, Match}; /// /// let re = BoundedBacktracker::new_many(&["[a-z]+", "[0-9]+"])?; /// let mut cache = re.create_cache(); /// /// let mut it = re.try_find_iter(&mut cache, "abc 1 foo 4567 0 quux"); /// assert_eq!(Some(Ok(Match::must(0, 0..3))), it.next()); /// assert_eq!(Some(Ok(Match::must(1, 4..5))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 6..9))), it.next()); /// assert_eq!(Some(Ok(Match::must(1, 10..14))), it.next()); /// assert_eq!(Some(Ok(Match::must(1, 15..16))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 17..21))), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct BoundedBacktracker { config: Config, nfa: NFA, } impl BoundedBacktracker { /// Parse the given regular expression using the default configuration and /// return the corresponding `BoundedBacktracker`. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, /// }; /// /// let re = BoundedBacktracker::new("foo[0-9]+bar")?; /// let mut cache = re.create_cache(); /// assert_eq!( /// Some(Ok(Match::must(0, 3..14))), /// re.try_find_iter(&mut cache, "zzzfoo12345barzzz").next(), /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new(pattern: &str) -> Result<BoundedBacktracker, BuildError> { BoundedBacktracker::builder().build(pattern) } /// Like `new`, but parses multiple patterns into a single "multi regex." /// This similarly uses the default regex configuration. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, /// }; /// /// let re = BoundedBacktracker::new_many(&["[a-z]+", "[0-9]+"])?; /// let mut cache = re.create_cache(); /// /// let mut it = re.try_find_iter(&mut cache, "abc 1 foo 4567 0 quux"); /// assert_eq!(Some(Ok(Match::must(0, 0..3))), it.next()); /// assert_eq!(Some(Ok(Match::must(1, 4..5))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 6..9))), it.next()); /// assert_eq!(Some(Ok(Match::must(1, 10..14))), it.next()); /// assert_eq!(Some(Ok(Match::must(1, 15..16))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 17..21))), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new_many<P: AsRef<str>>( patterns: &[P], ) -> Result<BoundedBacktracker, BuildError> { BoundedBacktracker::builder().build_many(patterns) } /// # Example /// /// This shows how to hand assemble a regular expression via its HIR, /// compile an NFA from it and build a BoundedBacktracker from the NFA. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{NFA, backtrack::BoundedBacktracker}, /// Match, /// }; /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; /// /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ /// ClassBytesRange::new(b'0', b'9'), /// ClassBytesRange::new(b'A', b'Z'), /// ClassBytesRange::new(b'_', b'_'), /// ClassBytesRange::new(b'a', b'z'), /// ]))); /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; /// /// let re = BoundedBacktracker::new_from_nfa(nfa)?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let expected = Some(Match::must(0, 3..4)); /// re.try_captures(&mut cache, "!@#A#@!", &mut caps)?; /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new_from_nfa(nfa: NFA) -> Result<BoundedBacktracker, BuildError> { BoundedBacktracker::builder().build_from_nfa(nfa) } /// Create a new `BoundedBacktracker` that matches every input. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, /// }; /// /// let re = BoundedBacktracker::always_match()?; /// let mut cache = re.create_cache(); /// /// let expected = Some(Ok(Match::must(0, 0..0))); /// assert_eq!(expected, re.try_find_iter(&mut cache, "").next()); /// assert_eq!(expected, re.try_find_iter(&mut cache, "foo").next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn always_match() -> Result<BoundedBacktracker, BuildError> { let nfa = thompson::NFA::always_match(); BoundedBacktracker::new_from_nfa(nfa) } /// Create a new `BoundedBacktracker` that never matches any input. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; /// /// let re = BoundedBacktracker::never_match()?; /// let mut cache = re.create_cache(); /// /// assert_eq!(None, re.try_find_iter(&mut cache, "").next()); /// assert_eq!(None, re.try_find_iter(&mut cache, "foo").next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn never_match() -> Result<BoundedBacktracker, BuildError> { let nfa = thompson::NFA::never_match(); BoundedBacktracker::new_from_nfa(nfa) } /// Return a default configuration for a `BoundedBacktracker`. /// /// This is a convenience routine to avoid needing to import the `Config` /// type when customizing the construction of a `BoundedBacktracker`. /// /// # Example /// /// This example shows how to disable UTF-8 mode. When UTF-8 mode is /// disabled, zero-width matches that split a codepoint are allowed. /// Otherwise they are never reported. /// /// In the code below, notice that `""` is permitted to match positions /// that split the encoding of a codepoint. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, backtrack::BoundedBacktracker}, /// Match, /// }; /// /// let re = BoundedBacktracker::builder() /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"")?; /// let mut cache = re.create_cache(); /// /// let haystack = "a☃z"; /// let mut it = re.try_find_iter(&mut cache, haystack); /// assert_eq!(Some(Ok(Match::must(0, 0..0))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 1..1))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 2..2))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 3..3))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 4..4))), it.next()); /// assert_eq!(Some(Ok(Match::must(0, 5..5))), it.next()); /// assert_eq!(None, it.next()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn config() -> Config { Config::new() } /// Return a builder for configuring the construction of a /// `BoundedBacktracker`. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. /// /// # Example /// /// This example shows how to use the builder to disable UTF-8 mode /// everywhere. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::{self, backtrack::BoundedBacktracker}, /// util::syntax, /// Match, /// }; /// /// let re = BoundedBacktracker::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 1..9)); /// re.try_captures(&mut cache, haystack, &mut caps)?; /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn builder() -> Builder { Builder::new() } /// Create a new cache for this regex. /// /// The cache returned should only be used for searches for this /// regex. If you want to reuse the cache for another regex, then you /// must call [`Cache::reset`] with that regex (or, equivalently, /// [`BoundedBacktracker::reset_cache`]). pub fn create_cache(&self) -> Cache { Cache::new(self) } /// Create a new empty set of capturing groups that is guaranteed to be /// valid for the search APIs on this `BoundedBacktracker`. /// /// A `Captures` value created for a specific `BoundedBacktracker` cannot /// be used with any other `BoundedBacktracker`. /// /// This is a convenience function for [`Captures::all`]. See the /// [`Captures`] documentation for an explanation of its alternative /// constructors that permit the `BoundedBacktracker` to do less work /// during a search, and thus might make it faster. pub fn create_captures(&self) -> Captures { Captures::all(self.get_nfa().group_info().clone()) } /// Reset the given cache such that it can be used for searching with the /// this `BoundedBacktracker` (and only this `BoundedBacktracker`). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different `BoundedBacktracker`. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different /// `BoundedBacktracker`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, /// }; /// /// let re1 = BoundedBacktracker::new(r"\w")?; /// let re2 = BoundedBacktracker::new(r"\W")?; /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Ok(Match::must(0, 0..2))), /// re1.try_find_iter(&mut cache, "Δ").next(), /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the BoundedBacktracker we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// cache.reset(&re2); /// assert_eq!( /// Some(Ok(Match::must(0, 0..3))), /// re2.try_find_iter(&mut cache, "☃").next(), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset_cache(&self, cache: &mut Cache) { cache.reset(self); } /// Returns the total number of patterns compiled into this /// `BoundedBacktracker`. /// /// In the case of a `BoundedBacktracker` that contains no patterns, this /// returns `0`. /// /// # Example /// /// This example shows the pattern length for a `BoundedBacktracker` that /// never matches: /// /// ``` /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; /// /// let re = BoundedBacktracker::never_match()?; /// assert_eq!(re.pattern_len(), 0); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And another example for a `BoundedBacktracker` that matches at every /// position: /// /// ``` /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; /// /// let re = BoundedBacktracker::always_match()?; /// assert_eq!(re.pattern_len(), 1); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And finally, a `BoundedBacktracker` that was constructed from multiple /// patterns: /// /// ``` /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; /// /// let re = BoundedBacktracker::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; /// assert_eq!(re.pattern_len(), 3); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn pattern_len(&self) -> usize { self.nfa.pattern_len() } /// Return the config for this `BoundedBacktracker`. #[inline] pub fn get_config(&self) -> &Config { &self.config } /// Returns a reference to the underlying NFA. #[inline] pub fn get_nfa(&self) -> &NFA { &self.nfa } /// Returns the maximum haystack length supported by this backtracker. /// /// This routine is a function of both [`Config::visited_capacity`] and the /// internal size of the backtracker's NFA. /// /// # Example /// /// This example shows how the maximum haystack length can vary depending /// on the size of the regex itself. Note though that the specific maximum /// values here are not an API guarantee. The default visited capacity is /// subject to change and not covered by semver. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, MatchError, /// }; /// /// // If you're only using ASCII, you get a big budget. /// let re = BoundedBacktracker::new(r"(?-u)\w+")?; /// let mut cache = re.create_cache(); /// assert_eq!(re.max_haystack_len(), 299_592); /// // Things work up to the max. /// let mut haystack = "a".repeat(299_592); /// let expected = Some(Ok(Match::must(0, 0..299_592))); /// assert_eq!(expected, re.try_find_iter(&mut cache, &haystack).next()); /// // But you'll get an error if you provide a haystack that's too big. /// // Notice that we use the 'try_find_iter' routine instead, which /// // yields Result<Match, MatchError> instead of Match. /// haystack.push('a'); /// let expected = Some(Err(MatchError::haystack_too_long(299_593))); /// assert_eq!(expected, re.try_find_iter(&mut cache, &haystack).next()); /// /// // Unicode inflates the size of the underlying NFA quite a bit, and /// // thus means that the backtracker can only handle smaller haystacks, /// // assuming that the visited capacity remains unchanged. /// let re = BoundedBacktracker::new(r"\w+")?; /// assert!(re.max_haystack_len() <= 7_000); /// // But we can increase the visited capacity to handle bigger haystacks! /// let re = BoundedBacktracker::builder() /// .configure(BoundedBacktracker::config().visited_capacity(1<<20)) /// .build(r"\w+")?; /// assert!(re.max_haystack_len() >= 25_000); /// assert!(re.max_haystack_len() <= 28_000); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn max_haystack_len(&self) -> usize { // The capacity given in the config is "bytes of heap memory," but the // capacity we use here is "number of bits." So convert the capacity in // bytes to the capacity in bits. let capacity = 8 * self.get_config().get_visited_capacity(); let blocks = div_ceil(capacity, Visited::BLOCK_SIZE); let real_capacity = blocks * Visited::BLOCK_SIZE; (real_capacity / self.nfa.states().len()) - 1 } } impl BoundedBacktracker { /// Returns true if and only if this regex matches the given haystack. /// /// In the case of a backtracking regex engine, and unlike most other /// regex engines in this crate, short circuiting isn't practical. However, /// this routine may still be faster because it instructs backtracking to /// not keep track of any capturing groups. /// /// # Errors /// /// This routine only errors if the search could not complete. For this /// backtracking regex engine, this only occurs when the haystack length /// exceeds [`BoundedBacktracker::max_haystack_len`]. /// /// When a search cannot complete, callers cannot know whether a match /// exists or not. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; /// /// let re = BoundedBacktracker::new("foo[0-9]+bar")?; /// let mut cache = re.create_cache(); /// /// assert!(re.try_is_match(&mut cache, "foo12345bar")?); /// assert!(!re.try_is_match(&mut cache, "foobar")?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: consistency with search APIs /// /// `is_match` is guaranteed to return `true` whenever `find` returns a /// match. This includes searches that are executed entirely within a /// codepoint: /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Input, /// }; /// /// let re = BoundedBacktracker::new("a*")?; /// let mut cache = re.create_cache(); /// /// assert!(!re.try_is_match(&mut cache, Input::new("☃").span(1..2))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Notice that when UTF-8 mode is disabled, then the above reports a /// match because the restriction against zero-width matches that split a /// codepoint has been lifted: /// /// ``` /// use regex_automata::{ /// nfa::thompson::{backtrack::BoundedBacktracker, NFA}, /// Input, /// }; /// /// let re = BoundedBacktracker::builder() /// .thompson(NFA::config().utf8(false)) /// .build("a*")?; /// let mut cache = re.create_cache(); /// /// assert!(re.try_is_match(&mut cache, Input::new("☃").span(1..2))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_is_match<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> Result<bool, MatchError> { let input = input.into().earliest(true); self.try_search_slots(cache, &input, &mut []).map(|pid| pid.is_some()) } /// Executes a leftmost forward search and returns a `Match` if one exists. /// /// This routine only includes the overall match span. To get /// access to the individual spans of each capturing group, use /// [`BoundedBacktracker::try_captures`]. /// /// # Errors /// /// This routine only errors if the search could not complete. For this /// backtracking regex engine, this only occurs when the haystack length /// exceeds [`BoundedBacktracker::max_haystack_len`]. /// /// When a search cannot complete, callers cannot know whether a match /// exists or not. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, /// }; /// /// let re = BoundedBacktracker::new("foo[0-9]+")?; /// let mut cache = re.create_cache(); /// let expected = Match::must(0, 0..8); /// assert_eq!(Some(expected), re.try_find(&mut cache, "foo12345")?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_find<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> Result<Option<Match>, MatchError> { let input = input.into(); if self.get_nfa().pattern_len() == 1 { let mut slots = [None, None]; let pid = match self.try_search_slots(cache, &input, &mut slots)? { None => return Ok(None), Some(pid) => pid, }; let start = match slots[0] { None => return Ok(None), Some(s) => s.get(), }; let end = match slots[1] { None => return Ok(None), Some(s) => s.get(), }; return Ok(Some(Match::new(pid, Span { start, end }))); } let ginfo = self.get_nfa().group_info(); let slots_len = ginfo.implicit_slot_len(); let mut slots = vec![None; slots_len]; let pid = match self.try_search_slots(cache, &input, &mut slots)? { None => return Ok(None), Some(pid) => pid, }; let start = match slots[pid.as_usize() * 2] { None => return Ok(None), Some(s) => s.get(), }; let end = match slots[pid.as_usize() * 2 + 1] { None => return Ok(None), Some(s) => s.get(), }; Ok(Some(Match::new(pid, Span { start, end }))) } /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided [`Captures`] /// value. If no match was found, then [`Captures::is_match`] is guaranteed /// to return `false`. /// /// # Errors /// /// This routine only errors if the search could not complete. For this /// backtracking regex engine, this only occurs when the haystack length /// exceeds [`BoundedBacktracker::max_haystack_len`]. /// /// When a search cannot complete, callers cannot know whether a match /// exists or not. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Span, /// }; /// /// let re = BoundedBacktracker::new( /// r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$", /// )?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.try_captures(&mut cache, "2010-03-14", &mut caps)?; /// assert!(caps.is_match()); /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_captures<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, caps: &mut Captures, ) -> Result<(), MatchError> { self.try_search(cache, &input.into(), caps) } /// Returns an iterator over all non-overlapping leftmost matches in the /// given bytes. If no match exists, then the iterator yields no elements. /// /// If the regex engine returns an error at any point, then the iterator /// will yield that error. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, MatchError, /// }; /// /// let re = BoundedBacktracker::new("foo[0-9]+")?; /// let mut cache = re.create_cache(); /// /// let text = "foo1 foo12 foo123"; /// let result: Result<Vec<Match>, MatchError> = re /// .try_find_iter(&mut cache, text) /// .collect(); /// let matches = result?; /// assert_eq!(matches, vec![ /// Match::must(0, 0..4), /// Match::must(0, 5..10), /// Match::must(0, 11..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_find_iter<'r, 'c, 'h, I: Into<Input<'h>>>( &'r self, cache: &'c mut Cache, input: I, ) -> TryFindMatches<'r, 'c, 'h> { let caps = Captures::matches(self.get_nfa().group_info().clone()); let it = iter::Searcher::new(input.into()); TryFindMatches { re: self, cache, caps, it } } /// Returns an iterator over all non-overlapping `Captures` values. If no /// match exists, then the iterator yields no elements. /// /// This yields the same matches as [`BoundedBacktracker::try_find_iter`], /// but it includes the spans of all capturing groups that participate in /// each match. /// /// If the regex engine returns an error at any point, then the iterator /// will yield that error. /// /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for /// how to correctly iterate over all matches in a haystack while avoiding /// the creation of a new `Captures` value for every match. (Which you are /// forced to do with an `Iterator`.) /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Span, /// }; /// /// let re = BoundedBacktracker::new("foo(?P<numbers>[0-9]+)")?; /// let mut cache = re.create_cache(); /// /// let text = "foo1 foo12 foo123"; /// let mut spans = vec![]; /// for result in re.try_captures_iter(&mut cache, text) { /// let caps = result?; /// // The unwrap is OK since 'numbers' matches if the pattern matches. /// spans.push(caps.get_group_by_name("numbers").unwrap()); /// } /// assert_eq!(spans, vec![ /// Span::from(3..4), /// Span::from(8..10), /// Span::from(14..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_captures_iter<'r, 'c, 'h, I: Into<Input<'h>>>( &'r self, cache: &'c mut Cache, input: I, ) -> TryCapturesMatches<'r, 'c, 'h> { let caps = self.create_captures(); let it = iter::Searcher::new(input.into()); TryCapturesMatches { re: self, cache, caps, it } } } impl BoundedBacktracker { /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided [`Captures`] /// value. If no match was found, then [`Captures::is_match`] is guaranteed /// to return `false`. /// /// This is like [`BoundedBacktracker::try_captures`], but it accepts a /// concrete `&Input` instead of an `Into<Input>`. /// /// # Errors /// /// This routine only errors if the search could not complete. For this /// backtracking regex engine, this only occurs when the haystack length /// exceeds [`BoundedBacktracker::max_haystack_len`]. /// /// When a search cannot complete, callers cannot know whether a match /// exists or not. /// /// # Example: specific pattern search /// /// This example shows how to build a multi bounded backtracker that /// permits searching for specific patterns. /// /// ``` /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Anchored, Input, Match, PatternID, /// }; /// /// let re = BoundedBacktracker::new_many(&[ /// "[a-z0-9]{6}", /// "[a-z][a-z0-9]{5}", /// ])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "foo123"; /// /// // Since we are using the default leftmost-first match and both /// // patterns match at the same starting position, only the first pattern /// // will be returned in this case when doing a search for any of the /// // patterns. /// let expected = Some(Match::must(0, 0..6)); /// re.try_search(&mut cache, &Input::new(haystack), &mut caps)?; /// assert_eq!(expected, caps.get_match()); /// /// // But if we want to check whether some other pattern matches, then we /// // can provide its pattern ID. /// let expected = Some(Match::must(1, 0..6)); /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(1))); /// re.try_search(&mut cache, &input, &mut caps)?; /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specifying the bounds of a search /// /// This example shows how providing the bounds of a search can produce /// different results than simply sub-slicing the haystack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, Input, /// }; /// /// let re = BoundedBacktracker::new(r"\b[0-9]{3}\b")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "foo123bar"; /// /// // Since we sub-slice the haystack, the search doesn't know about /// // the larger context and assumes that `123` is surrounded by word /// // boundaries. And of course, the match position is reported relative /// // to the sub-slice as well, which means we get `0..3` instead of /// // `3..6`. /// let expected = Some(Match::must(0, 0..3)); /// re.try_search(&mut cache, &Input::new(&haystack[3..6]), &mut caps)?; /// assert_eq!(expected, caps.get_match()); /// /// // But if we provide the bounds of the search within the context of the /// // entire haystack, then the search can take the surrounding context /// // into account. (And if we did find a match, it would be reported /// // as a valid offset into `haystack` instead of its sub-slice.) /// let expected = None; /// re.try_search( /// &mut cache, &Input::new(haystack).range(3..6), &mut caps, /// )?; /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search( &self, cache: &mut Cache, input: &Input<'_>, caps: &mut Captures, ) -> Result<(), MatchError> { caps.set_pattern(None); let pid = self.try_search_slots(cache, input, caps.slots_mut())?; caps.set_pattern(pid); Ok(()) } /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided `slots`, and /// returns the matching pattern ID. The contents of the slots for patterns /// other than the matching pattern are unspecified. If no match was found, /// then `None` is returned and the contents of all `slots` is unspecified. /// /// This is like [`BoundedBacktracker::try_search`], but it accepts a raw /// slots slice instead of a `Captures` value. This is useful in contexts /// where you don't want or need to allocate a `Captures`. /// /// It is legal to pass _any_ number of slots to this routine. If the regex /// engine would otherwise write a slot offset that doesn't fit in the /// provided slice, then it is simply skipped. In general though, there are /// usually three slice lengths you might want to use: /// /// * An empty slice, if you only care about which pattern matched. /// * A slice with /// [`pattern_len() * 2`](crate::nfa::thompson::NFA::pattern_len) /// slots, if you only care about the overall match spans for each matching /// pattern. /// * A slice with /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which /// permits recording match offsets for every capturing group in every /// pattern. /// /// # Errors /// /// This routine only errors if the search could not complete. For this /// backtracking regex engine, this only occurs when the haystack length /// exceeds [`BoundedBacktracker::max_haystack_len`]. /// /// When a search cannot complete, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to find the overall match offsets in a /// multi-pattern search without allocating a `Captures` value. Indeed, we /// can put our slots right on the stack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// PatternID, Input, /// }; /// /// let re = BoundedBacktracker::new_many(&[ /// r"\pL+", /// r"\d+", /// ])?; /// let mut cache = re.create_cache(); /// let input = Input::new("!@#123"); /// /// // We only care about the overall match offsets here, so we just /// // allocate two slots for each pattern. Each slot records the start /// // and end of the match. /// let mut slots = [None; 4]; /// let pid = re.try_search_slots(&mut cache, &input, &mut slots)?; /// assert_eq!(Some(PatternID::must(1)), pid); /// /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. /// // See 'GroupInfo' for more details on the mapping between groups and /// // slot indices. /// let slot_start = pid.unwrap().as_usize() * 2; /// let slot_end = slot_start + 1; /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Result<Option<PatternID>, MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); if !utf8empty { let maybe_hm = self.try_search_slots_imp(cache, input, slots)?; return Ok(maybe_hm.map(|hm| hm.pattern())); } // See PikeVM::try_search_slots for why we do this. let min = self.get_nfa().group_info().implicit_slot_len(); if slots.len() >= min { let maybe_hm = self.try_search_slots_imp(cache, input, slots)?; return Ok(maybe_hm.map(|hm| hm.pattern())); } if self.get_nfa().pattern_len() == 1 { let mut enough = [None, None]; let got = self.try_search_slots_imp(cache, input, &mut enough)?; // This is OK because we know `enough_slots` is strictly bigger // than `slots`, otherwise this special case isn't reached. slots.copy_from_slice(&enough[..slots.len()]); return Ok(got.map(|hm| hm.pattern())); } let mut enough = vec![None; min]; let got = self.try_search_slots_imp(cache, input, &mut enough)?; // This is OK because we know `enough_slots` is strictly bigger than // `slots`, otherwise this special case isn't reached. slots.copy_from_slice(&enough[..slots.len()]); Ok(got.map(|hm| hm.pattern())) } /// This is the actual implementation of `try_search_slots_imp` that /// doesn't account for the special case when 1) the NFA has UTF-8 mode /// enabled, 2) the NFA can match the empty string and 3) the caller has /// provided an insufficient number of slots to record match offsets. #[inline(never)] fn try_search_slots_imp( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Result<Option<HalfMatch>, MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); let hm = match self.search_imp(cache, input, slots)? { None => return Ok(None), Some(hm) if !utf8empty => return Ok(Some(hm)), Some(hm) => hm, }; empty::skip_splits_fwd(input, hm, hm.offset(), |input| { Ok(self .search_imp(cache, input, slots)? .map(|hm| (hm, hm.offset()))) }) } /// The implementation of standard leftmost backtracking search. /// /// Capturing group spans are written to 'caps', but only if requested. /// 'caps' can be one of three things: 1) totally empty, in which case, we /// only report the pattern that matched or 2) only has slots for recording /// the overall match offsets for any pattern or 3) has all slots available /// for recording the spans of any groups participating in a match. fn search_imp( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Result<Option<HalfMatch>, MatchError> { // Unlike in the PikeVM, we write our capturing group spans directly // into the caller's captures groups. So we have to make sure we're // starting with a blank slate first. In the PikeVM, we avoid this // by construction: the spans that are copied to every slot in the // 'Captures' value already account for presence/absence. In this // backtracker, we write directly into the caller provided slots, where // as in the PikeVM, we write into scratch space first and only copy // them to the caller provided slots when a match is found. for slot in slots.iter_mut() { *slot = None; } cache.setup_search(&self, input)?; if input.is_done() { return Ok(None); } let (anchored, start_id) = match input.get_anchored() { // Only way we're unanchored is if both the caller asked for an // unanchored search *and* the pattern is itself not anchored. Anchored::No => ( self.nfa.is_always_start_anchored(), // We always use the anchored starting state here, even if // doing an unanchored search. The "unanchored" part of it is // implemented in the loop below, by simply trying the next // byte offset if the previous backtracking exploration failed. self.nfa.start_anchored(), ), Anchored::Yes => (true, self.nfa.start_anchored()), Anchored::Pattern(pid) => match self.nfa.start_pattern(pid) { None => return Ok(None), Some(sid) => (true, sid), }, }; if anchored { let at = input.start(); return Ok(self.backtrack(cache, input, at, start_id, slots)); } let pre = self.get_config().get_prefilter(); let mut at = input.start(); while at <= input.end() { if let Some(ref pre) = pre { let span = Span::from(at..input.end()); match pre.find(input.haystack(), span) { None => break, Some(ref span) => at = span.start, } } if let Some(hm) = self.backtrack(cache, input, at, start_id, slots) { return Ok(Some(hm)); } at += 1; } Ok(None) } /// Look for a match starting at `at` in `input` and write the matching /// pattern ID and group spans to `caps`. The search uses `start_id` as its /// starting state in the underlying NFA. /// /// If no match was found, then the caller should increment `at` and try /// at the next position. #[cfg_attr(feature = "perf-inline", inline(always))] fn backtrack( &self, cache: &mut Cache, input: &Input<'_>, at: usize, start_id: StateID, slots: &mut [Option<NonMaxUsize>], ) -> Option<HalfMatch> { cache.stack.push(Frame::Step { sid: start_id, at }); while let Some(frame) = cache.stack.pop() { match frame { Frame::Step { sid, at } => { if let Some(hm) = self.step(cache, input, sid, at, slots) { return Some(hm); } } Frame::RestoreCapture { slot, offset } => { slots[slot] = offset; } } } None } // LAMENTATION: The actual backtracking search is implemented in about // 75 lines below. Yet this file is over 2,000 lines long. What have I // done? /// Execute a "step" in the backtracing algorithm. /// /// A "step" is somewhat of a misnomer, because this routine keeps going /// until it either runs out of things to try or fins a match. In the /// former case, it may have pushed some things on to the backtracking /// stack, in which case, those will be tried next as part of the /// 'backtrack' routine above. #[cfg_attr(feature = "perf-inline", inline(always))] fn step( &self, cache: &mut Cache, input: &Input<'_>, mut sid: StateID, mut at: usize, slots: &mut [Option<NonMaxUsize>], ) -> Option<HalfMatch> { loop { if !cache.visited.insert(sid, at - input.start()) { return None; } match *self.nfa.state(sid) { State::ByteRange { ref trans } => { // Why do we need this? Unlike other regex engines in this // crate, the backtracker can steam roll ahead in the // haystack outside of the main loop over the bytes in the // haystack. While 'trans.matches()' below handles the case // of 'at' being out of bounds of 'input.haystack()', we // also need to handle the case of 'at' going out of bounds // of the span the caller asked to search. // // We should perhaps make the 'trans.matches()' API accept // an '&Input' instead of a '&[u8]'. Or at least, add a new // API that does it. if at >= input.end() { return None; } if !trans.matches(input.haystack(), at) { return None; } sid = trans.next; at += 1; } State::Sparse(ref sparse) => { if at >= input.end() { return None; } sid = sparse.matches(input.haystack(), at)?; at += 1; } State::Dense(ref dense) => { if at >= input.end() { return None; } sid = dense.matches(input.haystack(), at)?; at += 1; } State::Look { look, next } => { // OK because we don't permit building a searcher with a // Unicode word boundary if the requisite Unicode data is // unavailable. if !self.nfa.look_matcher().matches_inline( look, input.haystack(), at, ) { return None; } sid = next; } State::Union { ref alternates } => { sid = match alternates.get(0) { None => return None, Some(&sid) => sid, }; cache.stack.extend( alternates[1..] .iter() .copied() .rev() .map(|sid| Frame::Step { sid, at }), ); } State::BinaryUnion { alt1, alt2 } => { sid = alt1; cache.stack.push(Frame::Step { sid: alt2, at }); } State::Capture { next, slot, .. } => { if slot.as_usize() < slots.len() { cache.stack.push(Frame::RestoreCapture { slot, offset: slots[slot], }); slots[slot] = NonMaxUsize::new(at); } sid = next; } State::Fail => return None, State::Match { pattern_id } => { return Some(HalfMatch::new(pattern_id, at)); } } } } } /// An iterator over all non-overlapping matches for a fallible search. /// /// The iterator yields a `Result<Match, MatchError` value until no more /// matches could be found. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the BoundedBacktracker. /// * `'c` represents the lifetime of the BoundedBacktracker's cache. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the [`BoundedBacktracker::try_find_iter`] /// method. #[derive(Debug)] pub struct TryFindMatches<'r, 'c, 'h> { re: &'r BoundedBacktracker, cache: &'c mut Cache, caps: Captures, it: iter::Searcher<'h>, } impl<'r, 'c, 'h> Iterator for TryFindMatches<'r, 'c, 'h> { type Item = Result<Match, MatchError>; #[inline] fn next(&mut self) -> Option<Result<Match, MatchError>> { // Splitting 'self' apart seems necessary to appease borrowck. let TryFindMatches { re, ref mut cache, ref mut caps, ref mut it } = *self; it.try_advance(|input| { re.try_search(cache, input, caps)?; Ok(caps.get_match()) }) .transpose() } } /// An iterator over all non-overlapping leftmost matches, with their capturing /// groups, for a fallible search. /// /// The iterator yields a `Result<Captures, MatchError>` value until no more /// matches could be found. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the BoundedBacktracker. /// * `'c` represents the lifetime of the BoundedBacktracker's cache. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the /// [`BoundedBacktracker::try_captures_iter`] method. #[derive(Debug)] pub struct TryCapturesMatches<'r, 'c, 'h> { re: &'r BoundedBacktracker, cache: &'c mut Cache, caps: Captures, it: iter::Searcher<'h>, } impl<'r, 'c, 'h> Iterator for TryCapturesMatches<'r, 'c, 'h> { type Item = Result<Captures, MatchError>; #[inline] fn next(&mut self) -> Option<Result<Captures, MatchError>> { // Splitting 'self' apart seems necessary to appease borrowck. let TryCapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = *self; let _ = it .try_advance(|input| { re.try_search(cache, input, caps)?; Ok(caps.get_match()) }) .transpose()?; if caps.is_match() { Some(Ok(caps.clone())) } else { None } } } /// A cache represents mutable state that a [`BoundedBacktracker`] requires /// during a search. /// /// For a given [`BoundedBacktracker`], its corresponding cache may be created /// either via [`BoundedBacktracker::create_cache`], or via [`Cache::new`]. /// They are equivalent in every way, except the former does not require /// explicitly importing `Cache`. /// /// A particular `Cache` is coupled with the [`BoundedBacktracker`] from which /// it was created. It may only be used with that `BoundedBacktracker`. A cache /// and its allocations may be re-purposed via [`Cache::reset`], in which case, /// it can only be used with the new `BoundedBacktracker` (and not the old /// one). #[derive(Clone, Debug)] pub struct Cache { /// Stack used on the heap for doing backtracking instead of the /// traditional recursive approach. We don't want recursion because then /// we're likely to hit a stack overflow for bigger regexes. stack: Vec<Frame>, /// The set of (StateID, HaystackOffset) pairs that have been visited /// by the backtracker within a single search. If such a pair has been /// visited, then we avoid doing the work for that pair again. This is /// what "bounds" the backtracking and prevents it from having worst case /// exponential time. visited: Visited, } impl Cache { /// Create a new [`BoundedBacktracker`] cache. /// /// A potentially more convenient routine to create a cache is /// [`BoundedBacktracker::create_cache`], as it does not require also /// importing the `Cache` type. /// /// If you want to reuse the returned `Cache` with some other /// `BoundedBacktracker`, then you must call [`Cache::reset`] with the /// desired `BoundedBacktracker`. pub fn new(re: &BoundedBacktracker) -> Cache { Cache { stack: vec![], visited: Visited::new(re) } } /// Reset this cache such that it can be used for searching with different /// [`BoundedBacktracker`]. /// /// A cache reset permits reusing memory already allocated in this cache /// with a different `BoundedBacktracker`. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different /// `BoundedBacktracker`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::backtrack::BoundedBacktracker, /// Match, /// }; /// /// let re1 = BoundedBacktracker::new(r"\w")?; /// let re2 = BoundedBacktracker::new(r"\W")?; /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Ok(Match::must(0, 0..2))), /// re1.try_find_iter(&mut cache, "Δ").next(), /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the BoundedBacktracker we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// cache.reset(&re2); /// assert_eq!( /// Some(Ok(Match::must(0, 0..3))), /// re2.try_find_iter(&mut cache, "☃").next(), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset(&mut self, re: &BoundedBacktracker) { self.visited.reset(re); } /// Returns the heap memory usage, in bytes, of this cache. /// /// This does **not** include the stack size used up by this cache. To /// compute that, use `std::mem::size_of::<Cache>()`. pub fn memory_usage(&self) -> usize { self.stack.len() * core::mem::size_of::<Frame>() + self.visited.memory_usage() } /// Clears this cache. This should be called at the start of every search /// to ensure we start with a clean slate. /// /// This also sets the length of the capturing groups used in the current /// search. This permits an optimization where by 'SlotTable::for_state' /// only returns the number of slots equivalent to the number of slots /// given in the 'Captures' value. This may be less than the total number /// of possible slots, e.g., when one only wants to track overall match /// offsets. This in turn permits less copying of capturing group spans /// in the BoundedBacktracker. fn setup_search( &mut self, re: &BoundedBacktracker, input: &Input<'_>, ) -> Result<(), MatchError> { self.stack.clear(); self.visited.setup_search(re, input)?; Ok(()) } } /// Represents a stack frame on the heap while doing backtracking. /// /// Instead of using explicit recursion for backtracking, we use a stack on /// the heap to keep track of things that we want to explore if the current /// backtracking branch turns out to not lead to a match. #[derive(Clone, Debug)] enum Frame { /// Look for a match starting at `sid` and the given position in the /// haystack. Step { sid: StateID, at: usize }, /// Reset the given `slot` to the given `offset` (which might be `None`). /// This effectively gives a "scope" to capturing groups, such that an /// offset for a particular group only gets returned if the match goes /// through that capturing group. If backtracking ends up going down a /// different branch that results in a different offset (or perhaps none at /// all), then this "restore capture" frame will cause the offset to get /// reset. RestoreCapture { slot: SmallIndex, offset: Option<NonMaxUsize> }, } /// A bitset that keeps track of whether a particular (StateID, offset) has /// been considered during backtracking. If it has already been visited, then /// backtracking skips it. This is what gives backtracking its "bound." #[derive(Clone, Debug)] struct Visited { /// The actual underlying bitset. Each element in the bitset corresponds /// to a particular (StateID, offset) pair. States correspond to the rows /// and the offsets correspond to the columns. /// /// If our underlying NFA has N states and the haystack we're searching /// has M bytes, then we have N*(M+1) entries in our bitset table. The /// M+1 occurs because our matches are delayed by one byte (to support /// look-around), and so we need to handle the end position itself rather /// than stopping just before the end. (If there is no end position, then /// it's treated as "end-of-input," which is matched by things like '$'.) /// /// Given BITS=N*(M+1), we wind up with div_ceil(BITS, sizeof(usize)) /// blocks. /// /// We use 'usize' to represent our blocks because it makes some of the /// arithmetic in 'insert' a bit nicer. For example, if we used 'u32' for /// our block, we'd either need to cast u32s to usizes or usizes to u32s. bitset: Vec<usize>, /// The stride represents one plus length of the haystack we're searching /// (as described above). The stride must be initialized for each search. stride: usize, } impl Visited { /// The size of each block, in bits. const BLOCK_SIZE: usize = 8 * core::mem::size_of::<usize>(); /// Create a new visited set for the given backtracker. /// /// The set is ready to use, but must be setup at the beginning of each /// search by calling `setup_search`. fn new(re: &BoundedBacktracker) -> Visited { let mut visited = Visited { bitset: vec![], stride: 0 }; visited.reset(re); visited } /// Insert the given (StateID, offset) pair into this set. If it already /// exists, then this is a no-op and it returns false. Otherwise this /// returns true. fn insert(&mut self, sid: StateID, at: usize) -> bool { let table_index = sid.as_usize() * self.stride + at; let block_index = table_index / Visited::BLOCK_SIZE; let bit = table_index % Visited::BLOCK_SIZE; let block_with_bit = 1 << bit; if self.bitset[block_index] & block_with_bit != 0 { return false; } self.bitset[block_index] |= block_with_bit; true } /// Reset this visited set to work with the given bounded backtracker. fn reset(&mut self, _: &BoundedBacktracker) { self.bitset.truncate(0); } /// Setup this visited set to work for a search using the given NFA /// and input configuration. The NFA must be the same NFA used by the /// BoundedBacktracker given to Visited::reset. Failing to call this might /// result in panics or silently incorrect search behavior. fn setup_search( &mut self, re: &BoundedBacktracker, input: &Input<'_>, ) -> Result<(), MatchError> { // Our haystack length is only the length of the span of the entire // haystack that we'll be searching. let haylen = input.get_span().len(); let err = || MatchError::haystack_too_long(haylen); // Our stride is one more than the length of the input because our main // search loop includes the position at input.end(). (And it does this // because matches are delayed by one byte to account for look-around.) self.stride = haylen + 1; let needed_capacity = match re.get_nfa().states().len().checked_mul(self.stride) { None => return Err(err()), Some(capacity) => capacity, }; let max_capacity = 8 * re.get_config().get_visited_capacity(); if needed_capacity > max_capacity { return Err(err()); } let needed_blocks = div_ceil(needed_capacity, Visited::BLOCK_SIZE); self.bitset.truncate(needed_blocks); for block in self.bitset.iter_mut() { *block = 0; } if needed_blocks > self.bitset.len() { self.bitset.resize(needed_blocks, 0); } Ok(()) } /// Return the heap memory usage, in bytes, of this visited set. fn memory_usage(&self) -> usize { self.bitset.len() * core::mem::size_of::<usize>() } } /// Integer division, but rounds up instead of down. fn div_ceil(lhs: usize, rhs: usize) -> usize { if lhs % rhs == 0 { lhs / rhs } else { (lhs / rhs) + 1 } } <file_sep>/regex-cli/args/overlapping.rs use lexopt::{Arg, Parser}; use crate::args::{Configurable, Usage}; /// This defines a configuration for overlapping searches. /// /// Currently, this just controls whether an overlapping search is enabled or /// not. By default, it's disabled. #[derive(Debug, Default)] pub struct Config { pub enabled: bool, } impl Configurable for Config { fn configure( &mut self, _: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("overlapping") => { self.enabled = true; } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[Usage::new( "--overlapping", "Enable overlapping search.", r#" Enable overlapping search. When this is enabled, the regex matcher will attempt to report all possible matches. Generally speaking, when one enables overlapping search, you also want to ensure that '--match-kind all' is given as well. Otherwise the overlapping search is unlikely to work as one would expect since any match semantics other than 'all' exclude some subset of matches from the underlying automaton. Note that overlapping search is not supported in every regex matcher. "#, )]; USAGES } } <file_sep>/regex-automata/src/meta/stopat.rs /*! This module defines two bespoke forward DFA search routines. One for the lazy DFA and one for the fully compiled DFA. These routines differ from the normal ones by reporting the position at which the search terminates when a match *isn't* found. This position at which a search terminates is useful in contexts where the meta regex engine runs optimizations that could go quadratic if we aren't careful. Namely, a regex search *could* scan to the end of the haystack only to report a non-match. If the caller doesn't know that the search scanned to the end of the haystack, it might restart the search at the next literal candidate it finds and repeat the process. Providing the caller with the position at which the search stopped provides a way for the caller to determine the point at which subsequent scans should not pass. This is principally used in the "reverse inner" optimization, which works like this: 1. Look for a match of an inner literal. Say, 'Z' in '\w+Z\d+'. 2. At the spot where 'Z' matches, do a reverse anchored search from there for '\w+'. 3. If the reverse search matches, it corresponds to the start position of a (possible) match. At this point, do a forward anchored search to find the end position. If an end position is found, then we have a match and we know its bounds. If the forward anchored search in (3) searches the entire rest of the haystack but reports a non-match, then a naive implementation of the above will continue back at step 1 looking for more candidates. There might still be a match to be found! It's possible. But we already scanned the whole haystack. So if we keep repeating the process, then we might wind up taking quadratic time in the size of the haystack, which is not great. So if the forward anchored search in (3) reports the position at which it stops, then we can detect whether quadratic behavior might be occurring in steps (1) and (2). For (1), it occurs if the literal candidate found occurs *before* the end of the previous search in (3), since that means we're now going to look for another match in a place where the forward search has already scanned. It is *correct* to do so, but our technique has become inefficient. For (2), quadratic behavior occurs similarly when its reverse search extends past the point where the previous forward search in (3) terminated. Indeed, to implement (2), we use the sibling 'limited' module for ensuring our reverse scan doesn't go further than we want. See the 'opt/reverse-inner' benchmarks in rebar for a real demonstration of how quadratic behavior is mitigated. */ use crate::{meta::error::RetryFailError, HalfMatch, Input, MatchError}; #[cfg(feature = "dfa-build")] pub(crate) fn dfa_try_search_half_fwd( dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>, input: &Input<'_>, ) -> Result<Result<HalfMatch, usize>, RetryFailError> { use crate::dfa::{accel, Automaton}; let mut mat = None; let mut sid = dfa.start_state_forward(input)?; let mut at = input.start(); while at < input.end() { sid = dfa.next_state(sid, input.haystack()[at]); if dfa.is_special_state(sid) { if dfa.is_match_state(sid) { let pattern = dfa.match_pattern(sid, 0); mat = Some(HalfMatch::new(pattern, at)); if input.get_earliest() { return Ok(mat.ok_or(at)); } if dfa.is_accel_state(sid) { let needs = dfa.accelerator(sid); at = accel::find_fwd(needs, input.haystack(), at) .unwrap_or(input.end()); continue; } } else if dfa.is_accel_state(sid) { let needs = dfa.accelerator(sid); at = accel::find_fwd(needs, input.haystack(), at) .unwrap_or(input.end()); continue; } else if dfa.is_dead_state(sid) { return Ok(mat.ok_or(at)); } else if dfa.is_quit_state(sid) { if mat.is_some() { return Ok(mat.ok_or(at)); } return Err(MatchError::quit(input.haystack()[at], at).into()); } else { // Ideally we wouldn't use a DFA that specialized start states // and thus 'is_start_state()' could never be true here, but in // practice we reuse the DFA created for the full regex which // will specialize start states whenever there is a prefilter. debug_assert!(dfa.is_start_state(sid)); } } at += 1; } dfa_eoi_fwd(dfa, input, &mut sid, &mut mat)?; Ok(mat.ok_or(at)) } #[cfg(feature = "hybrid")] pub(crate) fn hybrid_try_search_half_fwd( dfa: &crate::hybrid::dfa::DFA, cache: &mut crate::hybrid::dfa::Cache, input: &Input<'_>, ) -> Result<Result<HalfMatch, usize>, RetryFailError> { let mut mat = None; let mut sid = dfa.start_state_forward(cache, input)?; let mut at = input.start(); while at < input.end() { sid = dfa .next_state(cache, sid, input.haystack()[at]) .map_err(|_| MatchError::gave_up(at))?; if sid.is_tagged() { if sid.is_match() { let pattern = dfa.match_pattern(cache, sid, 0); mat = Some(HalfMatch::new(pattern, at)); if input.get_earliest() { return Ok(mat.ok_or(at)); } } else if sid.is_dead() { return Ok(mat.ok_or(at)); } else if sid.is_quit() { if mat.is_some() { return Ok(mat.ok_or(at)); } return Err(MatchError::quit(input.haystack()[at], at).into()); } else { // We should NEVER get an unknown state ID back from // dfa.next_state(). debug_assert!(!sid.is_unknown()); // Ideally we wouldn't use a lazy DFA that specialized start // states and thus 'sid.is_start()' could never be true here, // but in practice we reuse the lazy DFA created for the full // regex which will specialize start states whenever there is // a prefilter. debug_assert!(sid.is_start()); } } at += 1; } hybrid_eoi_fwd(dfa, cache, input, &mut sid, &mut mat)?; Ok(mat.ok_or(at)) } #[cfg(feature = "dfa-build")] #[cfg_attr(feature = "perf-inline", inline(always))] fn dfa_eoi_fwd( dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>, input: &Input<'_>, sid: &mut crate::util::primitives::StateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { use crate::dfa::Automaton; let sp = input.get_span(); match input.haystack().get(sp.end) { Some(&b) => { *sid = dfa.next_state(*sid, b); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, sp.end)); } else if dfa.is_quit_state(*sid) { if mat.is_some() { return Ok(()); } return Err(MatchError::quit(b, sp.end)); } } None => { *sid = dfa.next_eoi_state(*sid); if dfa.is_match_state(*sid) { let pattern = dfa.match_pattern(*sid, 0); *mat = Some(HalfMatch::new(pattern, input.haystack().len())); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!dfa.is_quit_state(*sid)); } } Ok(()) } #[cfg(feature = "hybrid")] #[cfg_attr(feature = "perf-inline", inline(always))] fn hybrid_eoi_fwd( dfa: &crate::hybrid::dfa::DFA, cache: &mut crate::hybrid::dfa::Cache, input: &Input<'_>, sid: &mut crate::hybrid::LazyStateID, mat: &mut Option<HalfMatch>, ) -> Result<(), MatchError> { let sp = input.get_span(); match input.haystack().get(sp.end) { Some(&b) => { *sid = dfa .next_state(cache, *sid, b) .map_err(|_| MatchError::gave_up(sp.end))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, sp.end)); } else if sid.is_quit() { if mat.is_some() { return Ok(()); } return Err(MatchError::quit(b, sp.end)); } } None => { *sid = dfa .next_eoi_state(cache, *sid) .map_err(|_| MatchError::gave_up(input.haystack().len()))?; if sid.is_match() { let pattern = dfa.match_pattern(cache, *sid, 0); *mat = Some(HalfMatch::new(pattern, input.haystack().len())); } // N.B. We don't have to check 'is_quit' here because the EOI // transition can never lead to a quit state. debug_assert!(!sid.is_quit()); } } Ok(()) } <file_sep>/regex-cli/args/thompson.rs use std::borrow::Borrow; use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::{nfa::thompson, util::look::LookMatcher}, regex_syntax::hir::Hir, }; use crate::args::{self, flags, Configurable, Usage}; /// This exposes all of the configuration knobs on a regex_automata::Input via /// CLI flags. The only aspect of regex_automata::Input that this does not /// cover is the haystack, which should be provided by other means (usually /// with `Haystack`). #[derive(Debug, Default)] pub struct Config { thompson: thompson::Config, } impl Config { /// Return a `thompson::Config` object from this configuration. pub fn thompson(&self) -> anyhow::Result<thompson::Config> { Ok(self.thompson.clone()) } /// Returns a new configuration that compiles a reverse NFA. pub fn reversed(&self) -> Config { // Reverse DFAs require that captures are disabled. In practice, there // is no current use case for a reverse NFA with capture groups. let thompson = self .thompson .clone() .reverse(true) .which_captures(thompson::WhichCaptures::None); Config { thompson } } /// Compiles the given `Hir` expressions into an NFA. If compilation fails, /// then an error is returned. (And there is generally no way to know which /// pattern caused a failure.) pub fn from_hirs<H: Borrow<Hir>>( &self, hirs: &[H], ) -> anyhow::Result<thompson::NFA> { thompson::Compiler::new() .configure(self.thompson()?) .build_many_from_hir(hirs) .context("failed to compile Thompson NFA") } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('B') | Arg::Long("no-utf8-nfa") => { self.thompson = self.thompson.clone().utf8(false); } Arg::Short('r') | Arg::Long("reverse") => { self.thompson = self.thompson.clone().reverse(true); } Arg::Long("nfa-size-limit") => { let limit = args::parse_maybe(p, "--nfa-size-limit")?; self.thompson = self.thompson.clone().nfa_size_limit(limit); } Arg::Long("shrink") => { self.thompson = self.thompson.clone().shrink(true); } Arg::Long("no-captures") => { self.thompson = self .thompson .clone() .which_captures(thompson::WhichCaptures::None); } Arg::Long("line-terminator") => { let byte: flags::OneByte = args::parse(p, "--line-terminator")?; let mut lookm = LookMatcher::new(); lookm.set_line_terminator(byte.0); self.thompson = self.thompson.clone().look_matcher(lookm); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "-B, --no-utf8-nfa", "Disable UTF-8 mode for empty matches.", r#" Disables UTF-8 mode for empty matches. When this flag is given, empty matches that split a codepoint are permitted. Otherwise, they are banned. "#, ), Usage::new( "-r, --reverse", "Build a reverse Thompson NFA.", r#" Build a reverse Thompson NFA. The reverse NFA matches the language described by the corresponding forward NFA, but in reverse. In general, this is done by reversing the concatenations of the regex and inverting look-around assertions that depend on the direction of matching. So for example, `^` becomes `$` and `$` becomes `^`. But `\b` and `\B` remain the same. Note that at time of writing, using this flag requires also using --no-captures. "#, ), Usage::new( "--nfa-size-limit", "Sets a limit on the memory used by an NFA.", r#" Sets a limit on the memory used by an NFA, in terms of bytes of heap usage. This limit is applied during NFA construction. If the limit is exceeded, then construction will fail. A special 'none' value disables the limit entirely. "#, ), Usage::new( "--shrink", "Enables NFA shrinking.", r#" This flag enables NFA shrinking. At time of writing, this is an expensive process that only applies to reverse NFA compilation. The process may get cheaper or more widely applicable in the future, but it generally results in making the state graph of large Unicode character classes much smaller. Moreover, if you're building a fully compiled reverse DFA, the extra time spent shrinking the NFA can lead to far larger savings in the subsequent DFA determinization. "#, ), Usage::new( "--no-captures", "Disable capture states.", r#" Disables capture states. By default, NFAs include special "capture" states that instruct some regex engines (like the PikeVM) to record offset positions in ancillary state. It can be useful to disable capture states in order to reduce "clutter" in the automaton when debugging it. Also, at time of writing, reverse NFAs require that capture groups are disabled. "#, ), Usage::new( "--line-terminator", "Set the line terminator used by line anchors.", r#" Set the line terminator used by line anchors. The line anchors are '(?m:^)' and '(?m:$)'. By default, they both use '\n' as line terminators for matching purposes. This option changes the line terminator to any arbitrary byte. Note that CRLF aware line anchors, that is, '(?Rm:^)' and '(?Rm:$)', are unaffected by this option. CRLF aware line anchors always use '\r' and '\n' as line terminators and do not match between a '\r' and '\n'. "#, ), ]; USAGES } } <file_sep>/regex-automata/src/util/syntax.rs /*! Utilities for dealing with the syntax of a regular expression. This module currently only exposes a [`Config`] type that itself represents a wrapper around the configuration for a [`regex-syntax::ParserBuilder`](regex_syntax::ParserBuilder). The purpose of this wrapper is to make configuring syntax options very similar to how other configuration is done throughout this crate. Namely, instead of duplicating syntax options across every builder (of which there are many), we instead create small config objects like this one that can be passed around and composed. */ use alloc::{vec, vec::Vec}; use regex_syntax::{ ast, hir::{self, Hir}, Error, ParserBuilder, }; /// A convenience routine for parsing a pattern into an HIR value with the /// default configuration. /// /// # Example /// /// This shows how to parse a pattern into an HIR value: /// /// ``` /// use regex_automata::util::syntax; /// /// let hir = syntax::parse(r"([a-z]+)|([0-9]+)")?; /// assert_eq!(Some(1), hir.properties().static_explicit_captures_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn parse(pattern: &str) -> Result<Hir, Error> { parse_with(pattern, &Config::default()) } /// A convenience routine for parsing many patterns into HIR value with the /// default configuration. /// /// # Example /// /// This shows how to parse many patterns into an corresponding HIR values: /// /// ``` /// use { /// regex_automata::util::syntax, /// regex_syntax::hir::Properties, /// }; /// /// let hirs = syntax::parse_many(&[ /// r"([a-z]+)|([0-9]+)", /// r"foo(A-Z]+)bar", /// ])?; /// let props = Properties::union(hirs.iter().map(|h| h.properties())); /// assert_eq!(Some(1), props.static_explicit_captures_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn parse_many<P: AsRef<str>>(patterns: &[P]) -> Result<Vec<Hir>, Error> { parse_many_with(patterns, &Config::default()) } /// A convenience routine for parsing a pattern into an HIR value using a /// `Config`. /// /// # Example /// /// This shows how to parse a pattern into an HIR value with a non-default /// configuration: /// /// ``` /// use regex_automata::util::syntax; /// /// let hir = syntax::parse_with( /// r"^[a-z]+$", /// &syntax::Config::new().multi_line(true).crlf(true), /// )?; /// assert!(hir.properties().look_set().contains_anchor_crlf()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn parse_with(pattern: &str, config: &Config) -> Result<Hir, Error> { let mut builder = ParserBuilder::new(); config.apply(&mut builder); builder.build().parse(pattern) } /// A convenience routine for parsing many patterns into HIR values using a /// `Config`. /// /// # Example /// /// This shows how to parse many patterns into an corresponding HIR values /// with a non-default configuration: /// /// ``` /// use { /// regex_automata::util::syntax, /// regex_syntax::hir::Properties, /// }; /// /// let patterns = &[ /// r"([a-z]+)|([0-9]+)", /// r"\W", /// r"foo(A-Z]+)bar", /// ]; /// let config = syntax::Config::new().unicode(false).utf8(false); /// let hirs = syntax::parse_many_with(patterns, &config)?; /// let props = Properties::union(hirs.iter().map(|h| h.properties())); /// assert!(!props.is_utf8()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn parse_many_with<P: AsRef<str>>( patterns: &[P], config: &Config, ) -> Result<Vec<Hir>, Error> { let mut builder = ParserBuilder::new(); config.apply(&mut builder); let mut hirs = vec![]; for p in patterns.iter() { hirs.push(builder.build().parse(p.as_ref())?); } Ok(hirs) } /// A common set of configuration options that apply to the syntax of a regex. /// /// This represents a group of configuration options that specifically apply /// to how the concrete syntax of a regular expression is interpreted. In /// particular, they are generally forwarded to the /// [`ParserBuilder`](https://docs.rs/regex-syntax/*/regex_syntax/struct.ParserBuilder.html) /// in the /// [`regex-syntax`](https://docs.rs/regex-syntax) /// crate when building a regex from its concrete syntax directly. /// /// These options are defined as a group since they apply to every regex engine /// in this crate. Instead of re-defining them on every engine's builder, they /// are instead provided here as one cohesive unit. #[derive(Clone, Copy, Debug)] pub struct Config { case_insensitive: bool, multi_line: bool, dot_matches_new_line: bool, crlf: bool, line_terminator: u8, swap_greed: bool, ignore_whitespace: bool, unicode: bool, utf8: bool, nest_limit: u32, octal: bool, } impl Config { /// Return a new default syntax configuration. pub fn new() -> Config { // These defaults match the ones used in regex-syntax. Config { case_insensitive: false, multi_line: false, dot_matches_new_line: false, crlf: false, line_terminator: b'\n', swap_greed: false, ignore_whitespace: false, unicode: true, utf8: true, nest_limit: 250, octal: false, } } /// Enable or disable the case insensitive flag by default. /// /// When Unicode mode is enabled, case insensitivity is Unicode-aware. /// Specifically, it will apply the "simple" case folding rules as /// specified by Unicode. /// /// By default this is disabled. It may alternatively be selectively /// enabled in the regular expression itself via the `i` flag. pub fn case_insensitive(mut self, yes: bool) -> Config { self.case_insensitive = yes; self } /// Enable or disable the multi-line matching flag by default. /// /// When this is enabled, the `^` and `$` look-around assertions will /// match immediately after and immediately before a new line character, /// respectively. Note that the `\A` and `\z` look-around assertions are /// unaffected by this setting and always correspond to matching at the /// beginning and end of the input. /// /// By default this is disabled. It may alternatively be selectively /// enabled in the regular expression itself via the `m` flag. pub fn multi_line(mut self, yes: bool) -> Config { self.multi_line = yes; self } /// Enable or disable the "dot matches any character" flag by default. /// /// When this is enabled, `.` will match any character. When it's disabled, /// then `.` will match any character except for a new line character. /// /// Note that `.` is impacted by whether the "unicode" setting is enabled /// or not. When Unicode is enabled (the default), `.` will match any UTF-8 /// encoding of any Unicode scalar value (sans a new line, depending on /// whether this "dot matches new line" option is enabled). When Unicode /// mode is disabled, `.` will match any byte instead. Because of this, /// when Unicode mode is disabled, `.` can only be used when the "allow /// invalid UTF-8" option is enabled, since `.` could otherwise match /// invalid UTF-8. /// /// By default this is disabled. It may alternatively be selectively /// enabled in the regular expression itself via the `s` flag. pub fn dot_matches_new_line(mut self, yes: bool) -> Config { self.dot_matches_new_line = yes; self } /// Enable or disable the "CRLF mode" flag by default. /// /// By default this is disabled. It may alternatively be selectively /// enabled in the regular expression itself via the `R` flag. /// /// When CRLF mode is enabled, the following happens: /// /// * Unless `dot_matches_new_line` is enabled, `.` will match any character /// except for `\r` and `\n`. /// * When `multi_line` mode is enabled, `^` and `$` will treat `\r\n`, /// `\r` and `\n` as line terminators. And in particular, neither will /// match between a `\r` and a `\n`. pub fn crlf(mut self, yes: bool) -> Config { self.crlf = yes; self } /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. /// /// Namely, instead of `.` (by default) matching everything except for `\n`, /// this will cause `.` to match everything except for the byte given. /// /// If `.` is used in a context where Unicode mode is enabled and this byte /// isn't ASCII, then an error will be returned. When Unicode mode is /// disabled, then any byte is permitted, but will return an error if UTF-8 /// mode is enabled and it is a non-ASCII byte. /// /// In short, any ASCII value for a line terminator is always okay. But a /// non-ASCII byte might result in an error depending on whether Unicode /// mode or UTF-8 mode are enabled. /// /// Note that if `R` mode is enabled then it always takes precedence and /// the line terminator will be treated as `\r` and `\n` simultaneously. /// /// Note also that this *doesn't* impact the look-around assertions /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional /// configuration in the regex engine itself. pub fn line_terminator(mut self, byte: u8) -> Config { self.line_terminator = byte; self } /// Enable or disable the "swap greed" flag by default. /// /// When this is enabled, `.*` (for example) will become ungreedy and `.*?` /// will become greedy. /// /// By default this is disabled. It may alternatively be selectively /// enabled in the regular expression itself via the `U` flag. pub fn swap_greed(mut self, yes: bool) -> Config { self.swap_greed = yes; self } /// Enable verbose mode in the regular expression. /// /// When enabled, verbose mode permits insigificant whitespace in many /// places in the regular expression, as well as comments. Comments are /// started using `#` and continue until the end of the line. /// /// By default, this is disabled. It may be selectively enabled in the /// regular expression by using the `x` flag regardless of this setting. pub fn ignore_whitespace(mut self, yes: bool) -> Config { self.ignore_whitespace = yes; self } /// Enable or disable the Unicode flag (`u`) by default. /// /// By default this is **enabled**. It may alternatively be selectively /// disabled in the regular expression itself via the `u` flag. /// /// Note that unless "allow invalid UTF-8" is enabled (it's disabled by /// default), a regular expression will fail to parse if Unicode mode is /// disabled and a sub-expression could possibly match invalid UTF-8. /// /// **WARNING**: Unicode mode can greatly increase the size of the compiled /// DFA, which can noticeably impact both memory usage and compilation /// time. This is especially noticeable if your regex contains character /// classes like `\w` that are impacted by whether Unicode is enabled or /// not. If Unicode is not necessary, you are encouraged to disable it. pub fn unicode(mut self, yes: bool) -> Config { self.unicode = yes; self } /// When disabled, the builder will permit the construction of a regular /// expression that may match invalid UTF-8. /// /// For example, when [`Config::unicode`] is disabled, then /// expressions like `[^a]` may match invalid UTF-8 since they can match /// any single byte that is not `a`. By default, these sub-expressions /// are disallowed to avoid returning offsets that split a UTF-8 /// encoded codepoint. However, in cases where matching at arbitrary /// locations is desired, this option can be disabled to permit all such /// sub-expressions. /// /// When enabled (the default), the builder is guaranteed to produce a /// regex that will only ever match valid UTF-8 (otherwise, the builder /// will return an error). pub fn utf8(mut self, yes: bool) -> Config { self.utf8 = yes; self } /// Set the nesting limit used for the regular expression parser. /// /// The nesting limit controls how deep the abstract syntax tree is allowed /// to be. If the AST exceeds the given limit (e.g., with too many nested /// groups), then an error is returned by the parser. /// /// The purpose of this limit is to act as a heuristic to prevent stack /// overflow when building a finite automaton from a regular expression's /// abstract syntax tree. In particular, construction currently uses /// recursion. In the future, the implementation may stop using recursion /// and this option will no longer be necessary. /// /// This limit is not checked until the entire AST is parsed. Therefore, /// if callers want to put a limit on the amount of heap space used, then /// they should impose a limit on the length, in bytes, of the concrete /// pattern string. In particular, this is viable since the parser will /// limit itself to heap space proportional to the length of the pattern /// string. /// /// Note that a nest limit of `0` will return a nest limit error for most /// patterns but not all. For example, a nest limit of `0` permits `a` but /// not `ab`, since `ab` requires a concatenation AST item, which results /// in a nest depth of `1`. In general, a nest limit is not something that /// manifests in an obvious way in the concrete syntax, therefore, it /// should not be used in a granular way. pub fn nest_limit(mut self, limit: u32) -> Config { self.nest_limit = limit; self } /// Whether to support octal syntax or not. /// /// Octal syntax is a little-known way of uttering Unicode codepoints in /// a regular expression. For example, `a`, `\x61`, `\u0061` and /// `\141` are all equivalent regular expressions, where the last example /// shows octal syntax. /// /// While supporting octal syntax isn't in and of itself a problem, it does /// make good error messages harder. That is, in PCRE based regex engines, /// syntax like `\1` invokes a backreference, which is explicitly /// unsupported in Rust's regex engine. However, many users expect it to /// be supported. Therefore, when octal support is disabled, the error /// message will explicitly mention that backreferences aren't supported. /// /// Octal syntax is disabled by default. pub fn octal(mut self, yes: bool) -> Config { self.octal = yes; self } /// Returns whether "unicode" mode is enabled. pub fn get_unicode(&self) -> bool { self.unicode } /// Returns whether "case insensitive" mode is enabled. pub fn get_case_insensitive(&self) -> bool { self.case_insensitive } /// Returns whether "multi line" mode is enabled. pub fn get_multi_line(&self) -> bool { self.multi_line } /// Returns whether "dot matches new line" mode is enabled. pub fn get_dot_matches_new_line(&self) -> bool { self.dot_matches_new_line } /// Returns whether "CRLF" mode is enabled. pub fn get_crlf(&self) -> bool { self.crlf } /// Returns the line terminator in this syntax configuration. pub fn get_line_terminator(&self) -> u8 { self.line_terminator } /// Returns whether "swap greed" mode is enabled. pub fn get_swap_greed(&self) -> bool { self.swap_greed } /// Returns whether "ignore whitespace" mode is enabled. pub fn get_ignore_whitespace(&self) -> bool { self.ignore_whitespace } /// Returns whether UTF-8 mode is enabled. pub fn get_utf8(&self) -> bool { self.utf8 } /// Returns the "nest limit" setting. pub fn get_nest_limit(&self) -> u32 { self.nest_limit } /// Returns whether "octal" mode is enabled. pub fn get_octal(&self) -> bool { self.octal } /// Applies this configuration to the given parser. pub(crate) fn apply(&self, builder: &mut ParserBuilder) { builder .unicode(self.unicode) .case_insensitive(self.case_insensitive) .multi_line(self.multi_line) .dot_matches_new_line(self.dot_matches_new_line) .crlf(self.crlf) .line_terminator(self.line_terminator) .swap_greed(self.swap_greed) .ignore_whitespace(self.ignore_whitespace) .utf8(self.utf8) .nest_limit(self.nest_limit) .octal(self.octal); } /// Applies this configuration to the given AST parser. pub(crate) fn apply_ast(&self, builder: &mut ast::parse::ParserBuilder) { builder .ignore_whitespace(self.ignore_whitespace) .nest_limit(self.nest_limit) .octal(self.octal); } /// Applies this configuration to the given AST-to-HIR translator. pub(crate) fn apply_hir( &self, builder: &mut hir::translate::TranslatorBuilder, ) { builder .unicode(self.unicode) .case_insensitive(self.case_insensitive) .multi_line(self.multi_line) .crlf(self.crlf) .dot_matches_new_line(self.dot_matches_new_line) .line_terminator(self.line_terminator) .swap_greed(self.swap_greed) .utf8(self.utf8); } } impl Default for Config { fn default() -> Config { Config::new() } } <file_sep>/regex-cli/cmd/find/which/nfa.rs use regex_automata::{Input, PatternSet}; use crate::{ args, util::{self, Table}, }; pub fn run_pikevm(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the PikeVM regex engine. USAGE: regex-cli find match pikevm [-p <pattern> ...] <haystack-path> regex-cli find match pikevm [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut thompson = args::thompson::Config::default(); let mut pikevm = args::pikevm::Config::default(); let mut find = super::super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut thompson, &mut pikevm, &mut find, ], )?; anyhow::ensure!( !find.count, "'which' command does not support reporting counts", ); let pats = patterns.get()?; let mut table = Table::empty(); let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (nfa, time) = util::timeitr(|| thompson.from_hirs(&hirs))?; table.add("compile nfa time", time); let (re, time) = util::timeitr(|| pikevm.from_nfa(&nfa))?; table.add("build pikevm time", time); let (mut cache, time) = util::timeit(|| re.create_cache()); table.add("cache creation time", time); let search = |input: &Input<'_>, patset: &mut PatternSet| { re.which_overlapping_matches(&mut cache, input, patset); Ok(()) }; super::run_search( &mut table, &common, &find, &input, &haystack, re.pattern_len(), search, )?; Ok(()) } <file_sep>/regex-cli/args/onepass.rs use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::{dfa::onepass, nfa::thompson::NFA}, }; use crate::args::{self, flags, Configurable, Usage}; /// Exposes the configuration of a one-pass DFA. #[derive(Debug, Default)] pub struct Config { onepass: onepass::Config, } impl Config { /// Return a `dfa::onepass::Config` object from this configuration. pub fn onepass(&self) -> anyhow::Result<onepass::Config> { Ok(self.onepass.clone()) } /// Attempts to convert the given NFA into a one-pass DFA. If the NFA isn't /// one-pass or if one of a few different limits is hit, then an error /// is returned. pub fn from_nfa(&self, nfa: &NFA) -> anyhow::Result<onepass::DFA> { onepass::Builder::new() .configure(self.onepass()?) .build_from_nfa(nfa.clone()) .context("failed to compile onepass DFA") } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('k') | Arg::Long("match-kind") => { let kind: flags::MatchKind = args::parse(p, "-k/--match-kind")?; self.onepass = self.onepass.clone().match_kind(kind.kind); } Arg::Long("starts-for-each-pattern") => { self.onepass = self.onepass.clone().starts_for_each_pattern(true); } Arg::Short('C') | Arg::Long("no-byte-classes") => { self.onepass = self.onepass.clone().byte_classes(false); } Arg::Long("onepass-size-limit") => { let limit = args::parse_maybe(p, "--onepass-size-limit")?; self.onepass = self.onepass.clone().size_limit(limit); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ flags::MatchKind::USAGE, Usage::new( "--starts-for-each-pattern", "Add anchored start states for each pattern.", r#" Add anchored start states for each pattern. This permits running an anchored search for a specific pattern using the --pattern-id flag. (Assuming this is a search command.) "#, ), Usage::new( "-C, --no-byte-classes", "Disable byte classes.", r#" This causes all bytes to be an equivalence class unto themselves. By default, bytes are grouped into equivalence classes to reduce the size of the alphabet for a DFA, and therefore decreases overall space usage. It can be quite convenient to disable byte classes when looking at the debug representation of a DFA. Otherwise, the transitions are much harder for a human to read. "#, ), Usage::new( "--onepass-size-limit", "Set a limit on heap used by a one-pass DFA in bytes.", r#" This sets a limit on the number of heap memory a one-pass DFA can use. The limit is enforced at one-pass DFA construction time. If the limit is exceeded, then construction will fail. A special value of 'none' may be given, which disables the limit. "#, ), ]; USAGES } } <file_sep>/.github/ISSUE_TEMPLATE/feature_request.md --- name: Feature request about: Suggest a new feature title: '' labels: '' assignees: '' --- #### Describe your feature request Please describe the behavior you want and the motivation. Please also provide examples of how the new feature would be used if your feature request were added. The best kind of feature request is one that proposes a concrete change to the API along with a sketch of the changes to the public API documentation. If you're not sure what to write here, then try imagining what the ideal documentation of your new feature would look like. Then try to write it. <file_sep>/regex-automata/tests/lib.rs // We have a similar config in the regex-automata crate root. Basically, it is // just too annoying to deal with dead code when a subset of features is // enabled. #![cfg_attr( not(all( feature = "std", feature = "nfa", feature = "dfa", feature = "hybrid", feature = "perf-literal-substring", feature = "perf-literal-multisubstring", )), allow(dead_code, unused_imports, unused_variables) )] // Similar deal with Miri. Just let dead code warnings be. #![cfg_attr(miri, allow(dead_code, unused_imports, unused_variables))] #[cfg(any(feature = "dfa-search", feature = "dfa-onepass"))] mod dfa; #[cfg(feature = "dfa-search")] mod fuzz; #[cfg(feature = "dfa-search")] mod gen; #[cfg(feature = "hybrid")] mod hybrid; #[cfg(feature = "meta")] mod meta; #[cfg(any(feature = "nfa-backtrack", feature = "nfa-pikevm"))] mod nfa; fn suite() -> anyhow::Result<regex_test::RegexTests> { let _ = env_logger::try_init(); let mut tests = regex_test::RegexTests::new(); macro_rules! load { ($name:expr) => {{ const DATA: &[u8] = include_bytes!(concat!("../../testdata/", $name, ".toml")); tests.load_slice($name, DATA)?; }}; } load!("anchored"); load!("bytes"); load!("crazy"); load!("crlf"); load!("earliest"); load!("empty"); load!("expensive"); load!("flags"); load!("iter"); load!("leftmost-all"); load!("line-terminator"); load!("misc"); load!("multiline"); load!("no-unicode"); load!("overlapping"); load!("regression"); load!("set"); load!("substring"); load!("unicode"); load!("utf8"); load!("word-boundary"); load!("fowler/basic"); load!("fowler/nullsubexpr"); load!("fowler/repetition"); Ok(tests) } /// Configure a regex_automata::Input with the given test configuration. fn create_input<'h>( test: &'h regex_test::RegexTest, ) -> regex_automata::Input<'h> { use regex_automata::Anchored; let bounds = test.bounds(); let anchored = if test.anchored() { Anchored::Yes } else { Anchored::No }; regex_automata::Input::new(test.haystack()) .range(bounds.start..bounds.end) .anchored(anchored) } /// Convert capture matches into the test suite's capture values. /// /// The given captures must represent a valid match, where the first capturing /// group has a non-None span. Otherwise this panics. fn testify_captures( caps: &regex_automata::util::captures::Captures, ) -> regex_test::Captures { assert!(caps.is_match(), "expected captures to represent a match"); let spans = caps.iter().map(|group| { group.map(|m| regex_test::Span { start: m.start, end: m.end }) }); // These unwraps are OK because we assume our 'caps' represents a match, // and a match always gives a non-zero number of groups with the first // group being non-None. regex_test::Captures::new(caps.pattern().unwrap().as_usize(), spans) .unwrap() } /// Convert a test harness match kind to a regex-automata match kind. If /// regex-automata doesn't support the harness kind, then `None` is returned. fn untestify_kind( kind: regex_test::MatchKind, ) -> Option<regex_automata::MatchKind> { match kind { regex_test::MatchKind::All => Some(regex_automata::MatchKind::All), regex_test::MatchKind::LeftmostFirst => { Some(regex_automata::MatchKind::LeftmostFirst) } regex_test::MatchKind::LeftmostLongest => None, } } <file_sep>/regex-automata/src/dfa/mod.rs /*! A module for building and searching with deterministic finite automata (DFAs). Like other modules in this crate, DFAs support a rich regex syntax with Unicode features. DFAs also have extensive options for configuring the best space vs time trade off for your use case and provides support for cheap deserialization of automata for use in `no_std` environments. If you're looking for lazy DFAs that build themselves incrementally during search, then please see the top-level [`hybrid` module](crate::hybrid). # Overview This section gives a brief overview of the primary types in this module: * A [`regex::Regex`] provides a way to search for matches of a regular expression using DFAs. This includes iterating over matches with both the start and end positions of each match. * A [`dense::DFA`] provides low level access to a DFA that uses a dense representation (uses lots of space, but fast searching). * A [`sparse::DFA`] provides the same API as a `dense::DFA`, but uses a sparse representation (uses less space, but slower searching). * An [`Automaton`] trait that defines an interface that both dense and sparse DFAs implement. (A `regex::Regex` is generic over this trait.) * Both dense DFAs and sparse DFAs support serialization to raw bytes (e.g., [`dense::DFA::to_bytes_little_endian`]) and cheap deserialization (e.g., [`dense::DFA::from_bytes`]). There is also a [`onepass`] module that provides a [one-pass DFA](onepass::DFA). The unique advantage of this DFA is that, for the class of regexes it can be built with, it supports reporting the spans of matching capturing groups. It is the only DFA in this crate capable of such a thing. # Example: basic regex searching This example shows how to compile a regex using the default configuration and then use it to find matches in a byte string: ``` use regex_automata::{Match, dfa::regex::Regex}; let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; let text = b"2018-12-24 2016-10-08"; let matches: Vec<Match> = re.find_iter(text).collect(); assert_eq!(matches, vec![ Match::must(0, 0..10), Match::must(0, 11..21), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Example: searching with regex sets The DFAs in this module all fully support searching with multiple regexes simultaneously. You can use this support with standard leftmost-first style searching to find non-overlapping matches: ``` # if cfg!(miri) { return Ok(()); } // miri takes too long use regex_automata::{Match, dfa::regex::Regex}; let re = Regex::new_many(&[r"\w+", r"\S+"])?; let text = b"@foo bar"; let matches: Vec<Match> = re.find_iter(text).collect(); assert_eq!(matches, vec![ Match::must(1, 0..4), Match::must(0, 5..8), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Example: use sparse DFAs By default, compiling a regex will use dense DFAs internally. This uses more memory, but executes searches more quickly. If you can abide slower searches (somewhere around 3-5x), then sparse DFAs might make more sense since they can use significantly less space. Using sparse DFAs is as easy as using `Regex::new_sparse` instead of `Regex::new`: ``` use regex_automata::{Match, dfa::regex::Regex}; let re = Regex::new_sparse(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); let text = b"2018-12-24 2016-10-08"; let matches: Vec<Match> = re.find_iter(text).collect(); assert_eq!(matches, vec![ Match::must(0, 0..10), Match::must(0, 11..21), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` If you already have dense DFAs for some reason, they can be converted to sparse DFAs and used to build a new `Regex`. For example: ``` use regex_automata::{Match, dfa::regex::Regex}; let dense_re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); let sparse_re = Regex::builder().build_from_dfas( dense_re.forward().to_sparse()?, dense_re.reverse().to_sparse()?, ); let text = b"2018-12-24 2016-10-08"; let matches: Vec<Match> = sparse_re.find_iter(text).collect(); assert_eq!(matches, vec![ Match::must(0, 0..10), Match::must(0, 11..21), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Example: deserialize a DFA This shows how to first serialize a DFA into raw bytes, and then deserialize those raw bytes back into a DFA. While this particular example is a bit contrived, this same technique can be used in your program to deserialize a DFA at start up time or by memory mapping a file. ``` use regex_automata::{Match, dfa::{dense, regex::Regex}}; let re1 = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); // serialize both the forward and reverse DFAs, see note below let (fwd_bytes, fwd_pad) = re1.forward().to_bytes_native_endian(); let (rev_bytes, rev_pad) = re1.reverse().to_bytes_native_endian(); // now deserialize both---we need to specify the correct type! let fwd: dense::DFA<&[u32]> = dense::DFA::from_bytes(&fwd_bytes[fwd_pad..])?.0; let rev: dense::DFA<&[u32]> = dense::DFA::from_bytes(&rev_bytes[rev_pad..])?.0; // finally, reconstruct our regex let re2 = Regex::builder().build_from_dfas(fwd, rev); // we can use it like normal let text = b"2018-12-24 2016-10-08"; let matches: Vec<Match> = re2.find_iter(text).collect(); assert_eq!(matches, vec![ Match::must(0, 0..10), Match::must(0, 11..21), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` There are a few points worth noting here: * We need to extract the raw DFAs used by the regex and serialize those. You can build the DFAs manually yourself using [`dense::Builder`], but using the DFAs from a `Regex` guarantees that the DFAs are built correctly. (In particular, a `Regex` constructs a reverse DFA for finding the starting location of matches.) * To convert the DFA to raw bytes, we use the `to_bytes_native_endian` method. In practice, you'll want to use either [`dense::DFA::to_bytes_little_endian`] or [`dense::DFA::to_bytes_big_endian`], depending on which platform you're deserializing your DFA from. If you intend to deserialize on either platform, then you'll need to serialize both and deserialize the right one depending on your target's endianness. * Safely deserializing a DFA requires verifying the raw bytes, particularly if they are untrusted, since an invalid DFA could cause logical errors, panics or even undefined behavior. This verification step requires visiting all of the transitions in the DFA, which can be costly. If cheaper verification is desired, then [`dense::DFA::from_bytes_unchecked`] is available that only does verification that can be performed in constant time. However, one can only use this routine if the caller can guarantee that the bytes provided encoded a valid DFA. The same process can be achieved with sparse DFAs as well: ``` use regex_automata::{Match, dfa::{sparse, regex::Regex}}; let re1 = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); // serialize both let fwd_bytes = re1.forward().to_sparse()?.to_bytes_native_endian(); let rev_bytes = re1.reverse().to_sparse()?.to_bytes_native_endian(); // now deserialize both---we need to specify the correct type! let fwd: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&fwd_bytes)?.0; let rev: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&rev_bytes)?.0; // finally, reconstruct our regex let re2 = Regex::builder().build_from_dfas(fwd, rev); // we can use it like normal let text = b"2018-12-24 2016-10-08"; let matches: Vec<Match> = re2.find_iter(text).collect(); assert_eq!(matches, vec![ Match::must(0, 0..10), Match::must(0, 11..21), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` Note that unlike dense DFAs, sparse DFAs have no alignment requirements. Conversely, dense DFAs must be be aligned to the same alignment as a [`StateID`](crate::util::primitives::StateID). # Support for `no_std` and `alloc`-only This crate comes with `alloc` and `std` features that are enabled by default. When the `alloc` or `std` features are enabled, the API of this module will include the facilities necessary for compiling, serializing, deserializing and searching with DFAs. When only the `alloc` feature is enabled, then implementations of the `std::error::Error` trait are dropped, but everything else generally remains the same. When both the `alloc` and `std` features are disabled, the API of this module will shrink such that it only includes the facilities necessary for deserializing and searching with DFAs. The intended workflow for `no_std` environments is thus as follows: * Write a program with the `alloc` or `std` features that compiles and serializes a regular expression. You may need to serialize both little and big endian versions of each DFA. (So that's 4 DFAs in total for each regex.) * In your `no_std` environment, follow the examples above for deserializing your previously serialized DFAs into regexes. You can then search with them as you would any regex. Deserialization can happen anywhere. For example, with bytes embedded into a binary or with a file memory mapped at runtime. The `regex-cli` command (found in the same repository as this crate) can be used to serialize DFAs to files and generate Rust code to read them. # Syntax This module supports the same syntax as the `regex` crate, since they share the same parser. You can find an exhaustive list of supported syntax in the [documentation for the `regex` crate](https://docs.rs/regex/1/regex/#syntax). There are two things that are not supported by the DFAs in this module: * Capturing groups. The DFAs (and [`Regex`](regex::Regex)es built on top of them) can only find the offsets of an entire match, but cannot resolve the offsets of each capturing group. This is because DFAs do not have the expressive power necessary. * Unicode word boundaries. These present particularly difficult challenges for DFA construction and would result in an explosion in the number of states. One can enable [`dense::Config::unicode_word_boundary`] though, which provides heuristic support for Unicode word boundaries that only works on ASCII text. Otherwise, one can use `(?-u:\b)` for an ASCII word boundary, which will work on any input. There are no plans to lift either of these limitations. Note that these restrictions are identical to the restrictions on lazy DFAs. # Differences with general purpose regexes The main goal of the [`regex`](https://docs.rs/regex) crate is to serve as a general purpose regular expression engine. It aims to automatically balance low compile times, fast search times and low memory usage, while also providing a convenient API for users. In contrast, this module provides a lower level regular expression interface based exclusively on DFAs that is a bit less convenient while providing more explicit control over memory usage and search times. Here are some specific negative differences: * **Compilation can take an exponential amount of time and space** in the size of the regex pattern. While most patterns do not exhibit worst case exponential time, such patterns do exist. For example, `[01]*1[01]{N}` will build a DFA with approximately `2^(N+2)` states. For this reason, untrusted patterns should not be compiled with this module. (In the future, the API may expose an option to return an error if the DFA gets too big.) * This module does not support sub-match extraction via capturing groups, which can be achieved with the regex crate's "captures" API. * While the regex crate doesn't necessarily sport fast compilation times, the regexes in this module are almost universally slow to compile, especially when they contain large Unicode character classes. For example, on my system, compiling `\w{50}` takes about 1 second and almost 15MB of memory! (Compiling a sparse regex takes about the same time but only uses about 1.2MB of memory.) Conversely, compiling the same regex without Unicode support, e.g., `(?-u)\w{50}`, takes under 1 millisecond and about 15KB of memory. For this reason, you should only use Unicode character classes if you absolutely need them! (They are enabled by default though.) * This module does not support Unicode word boundaries. ASCII word bondaries may be used though by disabling Unicode or selectively doing so in the syntax, e.g., `(?-u:\b)`. There is also an option to [heuristically enable Unicode word boundaries](crate::dfa::dense::Config::unicode_word_boundary), where the corresponding DFA will give up if any non-ASCII byte is seen. * As a lower level API, this module does not do literal optimizations automatically. Although it does provide hooks in its API to make use of the [`Prefilter`](crate::util::prefilter::Prefilter) trait. Missing literal optimizations means that searches may run much slower than what you're accustomed to, although, it does provide more predictable and consistent performance. * There is no `&str` API like in the regex crate. In this module, all APIs operate on `&[u8]`. By default, match indices are guaranteed to fall on UTF-8 boundaries, unless either of [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) or [`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) are disabled. With some of the downsides out of the way, here are some positive differences: * Both dense and sparse DFAs can be serialized to raw bytes, and then cheaply deserialized. Deserialization can be done in constant time with the unchecked APIs, since searching can be performed directly on the raw serialized bytes of a DFA. * This module was specifically designed so that the searching phase of a DFA has minimal runtime requirements, and can therefore be used in `no_std` environments. While `no_std` environments cannot compile regexes, they can deserialize pre-compiled regexes. * Since this module builds DFAs ahead of time, it will generally out-perform the `regex` crate on equivalent tasks. The performance difference is likely not large. However, because of a complex set of optimizations in the regex crate (like literal optimizations), an accurate performance comparison may be difficult to do. * Sparse DFAs provide a way to build a DFA ahead of time that sacrifices search performance a small amount, but uses much less storage space. Potentially even less than what the regex crate uses. * This module exposes DFAs directly, such as [`dense::DFA`] and [`sparse::DFA`], which enables one to do less work in some cases. For example, if you only need the end of a match and not the start of a match, then you can use a DFA directly without building a `Regex`, which always requires a second DFA to find the start of a match. * This module provides more control over memory usage. Aside from choosing between dense and sparse DFAs, one can also choose a smaller state identifier representation to use less space. Also, one can enable DFA minimization via [`dense::Config::minimize`], but it can increase compilation times dramatically. */ #[cfg(feature = "dfa-search")] pub use crate::dfa::{ automaton::{Automaton, OverlappingState}, start::StartKind, }; /// This is an alias for a state ID of zero. It has special significance /// because it always corresponds to the first state in a DFA, and the first /// state in a DFA is always "dead." That is, the dead state always has all /// of its transitions set to itself. Moreover, the dead state is used as a /// sentinel for various things. e.g., In search, reaching a dead state means /// that the search must stop. const DEAD: crate::util::primitives::StateID = crate::util::primitives::StateID::ZERO; #[cfg(feature = "dfa-search")] pub mod dense; #[cfg(feature = "dfa-onepass")] pub mod onepass; #[cfg(feature = "dfa-search")] pub mod regex; #[cfg(feature = "dfa-search")] pub mod sparse; #[cfg(feature = "dfa-search")] pub(crate) mod accel; #[cfg(feature = "dfa-search")] mod automaton; #[cfg(feature = "dfa-build")] mod determinize; #[cfg(feature = "dfa-build")] mod minimize; #[cfg(any(feature = "dfa-build", feature = "dfa-onepass"))] mod remapper; #[cfg(feature = "dfa-search")] mod search; #[cfg(feature = "dfa-search")] mod special; #[cfg(feature = "dfa-search")] mod start; <file_sep>/regex-cli/cmd/generate/unicode.rs use std::{ fs::File, io::Write, path::{Path, PathBuf}, process::Command, }; use { anyhow::Context, bstr::BString, lexopt::{Arg, Parser}, }; use crate::{ args::{self, Usage}, util, }; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Generates all Unicode tables for the regex project. Most Unicode tables are generated into the regex-syntax library. Note that this requires that the 'ucd-generate' tool be installed and in your PATH. The 'ucd-generate' tool is what is responsible for reading from the Unicode Character Database (UCD) and converting tables of codepoints into Rust code that is embedded into the regex library. ucd-generate can be found here https://github.com/BurntSushi/ucd-generate/ and can be installed with: cargo install ucd-generate And you can get a copy of, for example, UCD 15.0.0 like this: $ mkdir /tmp/ucd-15.0.0 $ cd /tmp/ucd-15.0.0 $ curl -LO https://www.unicode.org/Public/zipped/15.0.0/UCD.zip $ unzip UCD.zip USAGE: regex-cli generate unicode <outdir> <ucddir> outdir should be a directory path pointing to the root of the regex repository. ucddir should be a directory containing the UCD data downloaded from unicode.org, as described in ucd-generate's README: https://github.com/BurntSushi/ucd-generate/ "; let mut config = Config::default(); args::configure(p, USAGE, &mut [&mut config])?; let out = config.outdir()?; let ucd = config.ucddir()?; // Data tables for regex-syntax. let d = out.join("regex-syntax").join("src").join("unicode_tables"); gen(d.join("age.rs"), &["age", &ucd, "--chars"])?; gen( d.join("case_folding_simple.rs"), &["case-folding-simple", &ucd, "--chars", "--all-pairs"], )?; gen( d.join("general_category.rs"), &["general-category", &ucd, "--chars", "--exclude", "surrogate"], )?; gen( d.join("grapheme_cluster_break.rs"), &["grapheme-cluster-break", &ucd, "--chars"], )?; gen(d.join("property_bool.rs"), &["property-bool", &ucd, "--chars"])?; gen(d.join("property_names.rs"), &["property-names", &ucd])?; gen( d.join("property_values.rs"), &["property-values", &ucd, "--include", "gc,script,scx,age,gcb,wb,sb"], )?; gen(d.join("script.rs"), &["script", &ucd, "--chars"])?; gen( d.join("script_extension.rs"), &["script-extension", &ucd, "--chars"], )?; gen(d.join("sentence_break.rs"), &["sentence-break", &ucd, "--chars"])?; gen(d.join("word_break.rs"), &["word-break", &ucd, "--chars"])?; // Data tables for regex-automata. let d = out .join("regex-automata") .join("src") .join("util") .join("unicode_data"); // Note that this table is only used in cases where the regex-syntax // dependency is disabled for regex-automata, but where the // unicode-word-boundary feature is still enabled. That is, we do NOT // duplicate the perl_word table from regex-syntax. Only one copy will get // built. gen(d.join("perl_word.rs"), &["perl-word", &ucd, "--chars"])?; Ok(()) } #[derive(Debug, Default)] struct Config { outdir: Option<PathBuf>, ucddir: Option<PathBuf>, } impl Config { fn outdir(&self) -> anyhow::Result<&Path> { self.outdir .as_deref() .ok_or_else(|| anyhow::anyhow!("missing <outdir>")) } fn ucddir(&self) -> anyhow::Result<&str> { self.ucddir .as_deref() .ok_or_else(|| anyhow::anyhow!("missing <ucddir>"))? .to_str() .ok_or_else(|| anyhow::anyhow!("ucddir must be valid UTF-8")) } } impl args::Configurable for Config { fn configure( &mut self, _: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Value(ref mut value) => { if self.outdir.is_none() { self.outdir = Some(PathBuf::from(std::mem::take(value))); } else if self.ucddir.is_none() { self.ucddir = Some(PathBuf::from(std::mem::take(value))); } else { return Ok(false); } } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[]; USAGES } } /// Run 'ucd-generate' with the args given, write its output to the file path /// given and apply 'rustfmt' to it. fn gen<P: AsRef<Path>>(dest: P, args: &[&str]) -> anyhow::Result<()> { let dest = dest.as_ref(); ucdgen_to(dest, args)?; util::rustfmt(dest)?; Ok(()) } /// Run 'ucd-generate' with the args given and write its output to the file /// path given. fn ucdgen_to<P: AsRef<Path>>(dest: P, args: &[&str]) -> anyhow::Result<()> { let dest = dest.as_ref(); let err = || format!("{}", dest.display()); // The "right" thing would be to connect this to the stdout of // ucd-generate, but meh, I got lazy. let mut fdest = File::create(dest).with_context(err)?; let data = ucdgen(args)?; fdest.write_all(&data).with_context(err)?; Ok(()) } /// Run 'ucd-generate' with the args given. Upon success, the contents of /// stdout are returned. Otherwise, the error will include the contents of /// stderr. fn ucdgen(args: &[&str]) -> anyhow::Result<Vec<u8>> { let out = Command::new("ucd-generate") .args(args) .output() .context("ucd-generate command failed")?; anyhow::ensure!( out.status.success(), "ucd-generate {}: {}", args.join(" "), BString::from(out.stderr), ); Ok(out.stdout) } <file_sep>/regex-automata/src/nfa/thompson/map.rs // This module contains a couple simple and purpose built hash maps. The key // trade off they make is that they serve as caches rather than true maps. That // is, inserting a new entry may cause eviction of another entry. This gives // us two things. First, there's less overhead associated with inserts and // lookups. Secondly, it lets us control our memory usage. // // These maps are used in some fairly hot code when generating NFA states for // large Unicode character classes. // // Instead of exposing a rich hashmap entry API, we just permit the caller to // produce a hash of the key directly. The hash can then be reused for both // lookups and insertions at the cost of leaking abstraction a bit. But these // are for internal use only, so it's fine. // // The Utf8BoundedMap is used for Daciuk's algorithm for constructing a // (almost) minimal DFA for large Unicode character classes in linear time. // (Daciuk's algorithm is always used when compiling forward NFAs. For reverse // NFAs, it's only used when the compiler is configured to 'shrink' the NFA, // since there's a bit more expense in the reverse direction.) // // The Utf8SuffixMap is used when compiling large Unicode character classes for // reverse NFAs when 'shrink' is disabled. Specifically, it augments the naive // construction of UTF-8 automata by caching common suffixes. This doesn't // get the same space savings as Daciuk's algorithm, but it's basically as // fast as the naive approach and typically winds up using less memory (since // it generates smaller NFAs) despite the presence of the cache. // // These maps effectively represent caching mechanisms for sparse and // byte-range NFA states, respectively. The former represents a single NFA // state with many transitions of equivalent priority while the latter // represents a single NFA state with a single transition. (Neither state ever // has or is an epsilon transition.) Thus, they have different key types. It's // likely we could make one generic map, but the machinery didn't seem worth // it. They are simple enough. use alloc::{vec, vec::Vec}; use crate::{ nfa::thompson::Transition, util::{ int::{Usize, U64}, primitives::StateID, }, }; // Basic FNV-1a hash constants as described in: // https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function const PRIME: u64 = 1099511628211; const INIT: u64 = 14695981039346656037; /// A bounded hash map where the key is a sequence of NFA transitions and the /// value is a pre-existing NFA state ID. /// /// std's hashmap can be used for this, however, this map has two important /// advantages. Firstly, it has lower overhead. Secondly, it permits us to /// control our memory usage by limited the number of slots. In general, the /// cost here is that this map acts as a cache. That is, inserting a new entry /// may remove an old entry. We are okay with this, since it does not impact /// correctness in the cases where it is used. The only effect that dropping /// states from the cache has is that the resulting NFA generated may be bigger /// than it otherwise would be. /// /// This improves benchmarks that compile large Unicode character classes, /// since it makes the generation of (almost) minimal UTF-8 automaton faster. /// Specifically, one could observe the difference with std's hashmap via /// something like the following benchmark: /// /// hyperfine "regex-cli debug nfa thompson --quiet --reverse '\w{90} ecurB'" /// /// But to observe that difference, you'd have to modify the code to use /// std's hashmap. /// /// It is quite possible that there is a better way to approach this problem. /// For example, if there happens to be a very common state that collides with /// a lot of less frequent states, then we could wind up with very poor caching /// behavior. Alas, the effectiveness of this cache has not been measured. /// Instead, ad hoc experiments suggest that it is "good enough." Additional /// smarts (such as an LRU eviction policy) have to be weighed against the /// amount of extra time they cost. #[derive(Clone, Debug)] pub struct Utf8BoundedMap { /// The current version of this map. Only entries with matching versions /// are considered during lookups. If an entry is found with a mismatched /// version, then the map behaves as if the entry does not exist. /// /// This makes it possible to clear the map by simply incrementing the /// version number instead of actually deallocating any storage. version: u16, /// The total number of entries this map can store. capacity: usize, /// The actual entries, keyed by hash. Collisions between different states /// result in the old state being dropped. map: Vec<Utf8BoundedEntry>, } /// An entry in this map. #[derive(Clone, Debug, Default)] struct Utf8BoundedEntry { /// The version of the map used to produce this entry. If this entry's /// version does not match the current version of the map, then the map /// should behave as if this entry does not exist. version: u16, /// The key, which is a sorted sequence of non-overlapping NFA transitions. key: Vec<Transition>, /// The state ID corresponding to the state containing the transitions in /// this entry. val: StateID, } impl Utf8BoundedMap { /// Create a new bounded map with the given capacity. The map will never /// grow beyond the given size. /// /// Note that this does not allocate. Instead, callers must call `clear` /// before using this map. `clear` will allocate space if necessary. /// /// This avoids the need to pay for the allocation of this map when /// compiling regexes that lack large Unicode character classes. pub fn new(capacity: usize) -> Utf8BoundedMap { assert!(capacity > 0); Utf8BoundedMap { version: 0, capacity, map: vec![] } } /// Clear this map of all entries, but permit the reuse of allocation /// if possible. /// /// This must be called before the map can be used. pub fn clear(&mut self) { if self.map.is_empty() { self.map = vec![Utf8BoundedEntry::default(); self.capacity]; } else { self.version = self.version.wrapping_add(1); // If we loop back to version 0, then we forcefully clear the // entire map. Otherwise, it might be possible to incorrectly // match entries used to generate other NFAs. if self.version == 0 { self.map = vec![Utf8BoundedEntry::default(); self.capacity]; } } } /// Return a hash of the given transitions. pub fn hash(&self, key: &[Transition]) -> usize { let mut h = INIT; for t in key { h = (h ^ u64::from(t.start)).wrapping_mul(PRIME); h = (h ^ u64::from(t.end)).wrapping_mul(PRIME); h = (h ^ t.next.as_u64()).wrapping_mul(PRIME); } (h % self.map.len().as_u64()).as_usize() } /// Retrieve the cached state ID corresponding to the given key. The hash /// given must have been computed with `hash` using the same key value. /// /// If there is no cached state with the given transitions, then None is /// returned. pub fn get(&mut self, key: &[Transition], hash: usize) -> Option<StateID> { let entry = &self.map[hash]; if entry.version != self.version { return None; } // There may be a hash collision, so we need to confirm real equality. if entry.key != key { return None; } Some(entry.val) } /// Add a cached state to this map with the given key. Callers should /// ensure that `state_id` points to a state that contains precisely the /// NFA transitions given. /// /// `hash` must have been computed using the `hash` method with the same /// key. pub fn set( &mut self, key: Vec<Transition>, hash: usize, state_id: StateID, ) { self.map[hash] = Utf8BoundedEntry { version: self.version, key, val: state_id }; } } /// A cache of suffixes used to modestly compress UTF-8 automata for large /// Unicode character classes. #[derive(Clone, Debug)] pub struct Utf8SuffixMap { /// The current version of this map. Only entries with matching versions /// are considered during lookups. If an entry is found with a mismatched /// version, then the map behaves as if the entry does not exist. version: u16, /// The total number of entries this map can store. capacity: usize, /// The actual entries, keyed by hash. Collisions between different states /// result in the old state being dropped. map: Vec<Utf8SuffixEntry>, } /// A key that uniquely identifies an NFA state. It is a triple that represents /// a transition from one state for a particular byte range. #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct Utf8SuffixKey { pub from: StateID, pub start: u8, pub end: u8, } /// An entry in this map. #[derive(Clone, Debug, Default)] struct Utf8SuffixEntry { /// The version of the map used to produce this entry. If this entry's /// version does not match the current version of the map, then the map /// should behave as if this entry does not exist. version: u16, /// The key, which consists of a transition in a particular state. key: Utf8SuffixKey, /// The identifier that the transition in the key maps to. val: StateID, } impl Utf8SuffixMap { /// Create a new bounded map with the given capacity. The map will never /// grow beyond the given size. /// /// Note that this does not allocate. Instead, callers must call `clear` /// before using this map. `clear` will allocate space if necessary. /// /// This avoids the need to pay for the allocation of this map when /// compiling regexes that lack large Unicode character classes. pub fn new(capacity: usize) -> Utf8SuffixMap { assert!(capacity > 0); Utf8SuffixMap { version: 0, capacity, map: vec![] } } /// Clear this map of all entries, but permit the reuse of allocation /// if possible. /// /// This must be called before the map can be used. pub fn clear(&mut self) { if self.map.is_empty() { self.map = vec![Utf8SuffixEntry::default(); self.capacity]; } else { self.version = self.version.wrapping_add(1); if self.version == 0 { self.map = vec![Utf8SuffixEntry::default(); self.capacity]; } } } /// Return a hash of the given transition. pub fn hash(&self, key: &Utf8SuffixKey) -> usize { // Basic FNV-1a hash as described: // https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function const PRIME: u64 = 1099511628211; const INIT: u64 = 14695981039346656037; let mut h = INIT; h = (h ^ key.from.as_u64()).wrapping_mul(PRIME); h = (h ^ u64::from(key.start)).wrapping_mul(PRIME); h = (h ^ u64::from(key.end)).wrapping_mul(PRIME); (h % self.map.len().as_u64()).as_usize() } /// Retrieve the cached state ID corresponding to the given key. The hash /// given must have been computed with `hash` using the same key value. /// /// If there is no cached state with the given key, then None is returned. pub fn get( &mut self, key: &Utf8SuffixKey, hash: usize, ) -> Option<StateID> { let entry = &self.map[hash]; if entry.version != self.version { return None; } if key != &entry.key { return None; } Some(entry.val) } /// Add a cached state to this map with the given key. Callers should /// ensure that `state_id` points to a state that contains precisely the /// NFA transition given. /// /// `hash` must have been computed using the `hash` method with the same /// key. pub fn set(&mut self, key: Utf8SuffixKey, hash: usize, state_id: StateID) { self.map[hash] = Utf8SuffixEntry { version: self.version, key, val: state_id }; } } <file_sep>/regex-automata/src/util/determinize/state.rs /*! This module defines a DFA state representation and builders for constructing DFA states. This representation is specifically for use in implementations of NFA-to-DFA conversion via powerset construction. (Also called "determinization" in this crate.) The term "DFA state" is somewhat overloaded in this crate. In some cases, it refers to the set of transitions over an alphabet for a particular state. In other cases, it refers to a set of NFA states. The former is really about the final representation of a state in a DFA's transition table, where as the latter---what this module is focused on---is closer to an intermediate form that is used to help eventually build the transition table. This module exports four types. All four types represent the same idea: an ordered set of NFA states. This ordered set represents the epsilon closure of a particular NFA state, where the "epsilon closure" is the set of NFA states that can be transitioned to without consuming any input. i.e., Follow all of the NFA state's epsilon transitions. In addition, this implementation of DFA states cares about two other things: the ordered set of pattern IDs corresponding to the patterns that match if the state is a match state, and the set of look-behind assertions that were true when the state was created. The first, `State`, is a frozen representation of a state that cannot be modified. It may be cheaply cloned without copying the state itself and can be accessed safely from multiple threads simultaneously. This type is useful for when one knows that the DFA state being constructed is distinct from any other previously constructed states. Namely, powerset construction, in practice, requires one to keep a cache of previously created DFA states. Otherwise, the number of DFA states created in memory balloons to an impractically large number. For this reason, equivalent states should endeavor to have an equivalent byte-level representation. (In general, "equivalency" here means, "equivalent assertions, pattern IDs and NFA state IDs." We do not require that full DFA minimization be implemented here. This form of equivalency is only surface deep and is more-or-less a practical necessity.) The other three types represent different phases in the construction of a DFA state. Internally, these three types (and `State`) all use the same byte-oriented representation. That means one can use any of the builder types to check whether the state it represents already exists or not. If it does, then there is no need to freeze it into a `State` (which requires an alloc and a copy). Here are the three types described succinctly: * `StateBuilderEmpty` represents a state with no pattern IDs, no assertions and no NFA states. Creating a `StateBuilderEmpty` performs no allocs. A `StateBuilderEmpty` can only be used to query its underlying memory capacity, or to convert into a builder for recording pattern IDs and/or assertions. * `StateBuilderMatches` represents a state with zero or more pattern IDs, zero or more satisfied assertions and zero NFA state IDs. A `StateBuilderMatches` can only be used for adding pattern IDs and recording assertions. * `StateBuilderNFA` represents a state with zero or more pattern IDs, zero or more satisfied assertions and zero or more NFA state IDs. A `StateBuilderNFA` can only be used for adding NFA state IDs and recording some assertions. The expected flow here is to use the above builders to construct a candidate DFA state to check if it already exists. If it does, then there's no need to freeze it into a `State`. It it doesn't exist, then `StateBuilderNFA::to_state` can be called to freeze the builder into an immutable `State`. In either case, `clear` should be called on the builder to turn it back into a `StateBuilderEmpty` that reuses the underlying memory. The main purpose for splitting the builder into these distinct types is to make it impossible to do things like adding a pattern ID after adding an NFA state ID. Namely, this makes it simpler to use a space-and-time efficient binary representation for the state. (The format is documented on the `Repr` type below.) If we just used one type for everything, it would be possible for callers to use an incorrect interleaving of calls and thus result in a corrupt representation. I chose to use more type machinery to make this impossible to do because 1) determinization is itself pretty complex and it wouldn't be too hard to foul this up and 2) there isn't too much machinery involved and it's well contained. As an optimization, sometimes states won't have certain things set. For example, if the underlying NFA has no word boundary assertions, then there is no reason to set a state's look-behind assertion as to whether it was generated from a word byte or not. Similarly, if a state has no NFA states corresponding to look-around assertions, then there is no reason to set `look_have` to a non-empty set. Finally, callers usually omit unconditional epsilon transitions when adding NFA state IDs since they aren't discriminatory. Finally, the binary representation used by these states is, thankfully, not serialized anywhere. So any kind of change can be made with reckless abandon, as long as everything in this module agrees. */ use core::{convert::TryFrom, mem}; use alloc::{sync::Arc, vec::Vec}; use crate::util::{ int::{I32, U32}, look::LookSet, primitives::{PatternID, StateID}, wire::{self, Endian}, }; /// A DFA state that, at its core, is represented by an ordered set of NFA /// states. /// /// This type is intended to be used only in NFA-to-DFA conversion via powerset /// construction. /// /// It may be cheaply cloned and accessed safely from multiple threads /// simultaneously. #[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord)] pub(crate) struct State(Arc<[u8]>); /// This Borrow impl permits us to lookup any state in a map by its byte /// representation. This is particularly convenient when one has a StateBuilder /// and we want to see if a correspondingly equivalent state already exists. If /// one does exist, then we can reuse the allocation required by StateBuilder /// without having to convert it into a State first. impl core::borrow::Borrow<[u8]> for State { fn borrow(&self) -> &[u8] { &*self.0 } } impl core::fmt::Debug for State { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_tuple("State").field(&self.repr()).finish() } } /// For docs on these routines, see the internal Repr and ReprVec types below. impl State { pub(crate) fn dead() -> State { StateBuilderEmpty::new().into_matches().into_nfa().to_state() } pub(crate) fn is_match(&self) -> bool { self.repr().is_match() } pub(crate) fn is_from_word(&self) -> bool { self.repr().is_from_word() } pub(crate) fn is_half_crlf(&self) -> bool { self.repr().is_half_crlf() } pub(crate) fn look_have(&self) -> LookSet { self.repr().look_have() } pub(crate) fn look_need(&self) -> LookSet { self.repr().look_need() } pub(crate) fn match_len(&self) -> usize { self.repr().match_len() } pub(crate) fn match_pattern(&self, index: usize) -> PatternID { self.repr().match_pattern(index) } pub(crate) fn match_pattern_ids(&self) -> Option<Vec<PatternID>> { self.repr().match_pattern_ids() } #[cfg(all(test, not(miri)))] pub(crate) fn iter_match_pattern_ids<F: FnMut(PatternID)>(&self, f: F) { self.repr().iter_match_pattern_ids(f) } pub(crate) fn iter_nfa_state_ids<F: FnMut(StateID)>(&self, f: F) { self.repr().iter_nfa_state_ids(f) } pub(crate) fn memory_usage(&self) -> usize { self.0.len() } fn repr(&self) -> Repr<'_> { Repr(&*self.0) } } /// A state builder that represents an empty state. /// /// This is a useful "initial condition" for state construction. It has no /// NFA state IDs, no assertions set and no pattern IDs. No allocations are /// made when new() is called. Its main use is for being converted into a /// builder that can capture assertions and pattern IDs. #[derive(Clone, Debug)] pub(crate) struct StateBuilderEmpty(Vec<u8>); /// For docs on these routines, see the internal Repr and ReprVec types below. impl StateBuilderEmpty { pub(crate) fn new() -> StateBuilderEmpty { StateBuilderEmpty(alloc::vec![]) } pub(crate) fn into_matches(mut self) -> StateBuilderMatches { self.0.extend_from_slice(&[0, 0, 0, 0, 0]); StateBuilderMatches(self.0) } fn clear(&mut self) { self.0.clear(); } pub(crate) fn capacity(&self) -> usize { self.0.capacity() } } /// A state builder that collects assertions and pattern IDs. /// /// When collecting pattern IDs is finished, this can be converted into a /// builder that collects NFA state IDs. #[derive(Clone)] pub(crate) struct StateBuilderMatches(Vec<u8>); impl core::fmt::Debug for StateBuilderMatches { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_tuple("StateBuilderMatches").field(&self.repr()).finish() } } /// For docs on these routines, see the internal Repr and ReprVec types below. impl StateBuilderMatches { pub(crate) fn into_nfa(mut self) -> StateBuilderNFA { self.repr_vec().close_match_pattern_ids(); StateBuilderNFA { repr: self.0, prev_nfa_state_id: StateID::ZERO } } pub(crate) fn set_is_from_word(&mut self) { self.repr_vec().set_is_from_word() } pub(crate) fn set_is_half_crlf(&mut self) { self.repr_vec().set_is_half_crlf() } pub(crate) fn look_have(&self) -> LookSet { LookSet::read_repr(&self.0[1..]) } pub(crate) fn set_look_have( &mut self, set: impl FnMut(LookSet) -> LookSet, ) { self.repr_vec().set_look_have(set) } pub(crate) fn add_match_pattern_id(&mut self, pid: PatternID) { self.repr_vec().add_match_pattern_id(pid) } fn repr(&self) -> Repr<'_> { Repr(&self.0) } fn repr_vec(&mut self) -> ReprVec<'_> { ReprVec(&mut self.0) } } /// A state builder that collects some assertions and NFA state IDs. /// /// When collecting NFA state IDs is finished, this can be used to build a /// `State` if necessary. /// /// When dont with building a state (regardless of whether it got kept or not), /// it's usually a good idea to call `clear` to get an empty builder back so /// that it can be reused to build the next state. #[derive(Clone)] pub(crate) struct StateBuilderNFA { repr: Vec<u8>, prev_nfa_state_id: StateID, } impl core::fmt::Debug for StateBuilderNFA { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_tuple("StateBuilderNFA").field(&self.repr()).finish() } } /// For docs on these routines, see the internal Repr and ReprVec types below. impl StateBuilderNFA { pub(crate) fn to_state(&self) -> State { State(Arc::from(&*self.repr)) } pub(crate) fn clear(self) -> StateBuilderEmpty { let mut builder = StateBuilderEmpty(self.repr); builder.clear(); builder } pub(crate) fn look_need(&self) -> LookSet { self.repr().look_need() } pub(crate) fn set_look_have( &mut self, set: impl FnMut(LookSet) -> LookSet, ) { self.repr_vec().set_look_have(set) } pub(crate) fn set_look_need( &mut self, set: impl FnMut(LookSet) -> LookSet, ) { self.repr_vec().set_look_need(set) } pub(crate) fn add_nfa_state_id(&mut self, sid: StateID) { ReprVec(&mut self.repr) .add_nfa_state_id(&mut self.prev_nfa_state_id, sid) } pub(crate) fn as_bytes(&self) -> &[u8] { &self.repr } fn repr(&self) -> Repr<'_> { Repr(&self.repr) } fn repr_vec(&mut self) -> ReprVec<'_> { ReprVec(&mut self.repr) } } /// Repr is a read-only view into the representation of a DFA state. /// /// Primarily, a Repr is how we achieve DRY: we implement decoding the format /// in one place, and then use a Repr to implement the various methods on the /// public state types. /// /// The format is as follows: /// /// The first three bytes correspond to bitsets. /// /// Byte 0 is a bitset corresponding to miscellaneous flags associated with the /// state. Bit 0 is set to 1 if the state is a match state. Bit 1 is set to 1 /// if the state has pattern IDs explicitly written to it. (This is a flag that /// is not meant to be set by determinization, but rather, is used as part of /// an internal space-saving optimization.) Bit 2 is set to 1 if the state was /// generated by a transition over a "word" byte. (Callers may not always set /// this. For example, if the NFA has no word boundary assertion, then needing /// to track whether a state came from a word byte or not is superfluous and /// wasteful.) /// /// Byte 1 corresponds to the look-behind assertions that were satisfied by /// the transition that created this state. This generally only includes the /// StartLF and Start assertions. (Look-ahead assertions are not tracked as /// part of states. Instead, these are applied by re-computing the epsilon /// closure of a state when computing the transition function. See `next` in /// the parent module.) /// /// Byte 2 corresponds to the set of look-around assertions (including both /// look-behind and look-ahead) that appear somewhere in this state's set of /// NFA state IDs. This is used to determine whether this state's epsilon /// closure should be re-computed when computing the transition function. /// Namely, look-around assertions are "just" conditional epsilon transitions, /// so if there are new assertions available when computing the transition /// function, we should only re-compute the epsilon closure if those new /// assertions are relevant to this particular state. /// /// Bytes 3..7 correspond to a 32-bit native-endian encoded integer /// corresponding to the number of patterns encoded in this state. If the state /// is not a match state (byte 0 bit 0 is 0) or if it's only pattern ID is /// PatternID::ZERO, then no integer is encoded at this position. Instead, byte /// offset 3 is the position at which the first NFA state ID is encoded. /// /// For a match state with at least one non-ZERO pattern ID, the next bytes /// correspond to a sequence of 32-bit native endian encoded integers that /// represent each pattern ID, in order, that this match state represents. /// /// After the pattern IDs (if any), NFA state IDs are delta encoded as /// varints.[1] The first NFA state ID is encoded as itself, and each /// subsequent NFA state ID is encoded as the difference between itself and the /// previous NFA state ID. /// /// [1] - https://developers.google.com/protocol-buffers/docs/encoding#varints struct Repr<'a>(&'a [u8]); impl<'a> Repr<'a> { /// Returns true if and only if this is a match state. /// /// If callers have added pattern IDs to this state, then callers MUST set /// this state as a match state explicitly. However, as a special case, /// states that are marked as match states but with no pattern IDs, then /// the state is treated as if it had a single pattern ID equivalent to /// PatternID::ZERO. fn is_match(&self) -> bool { self.0[0] & (1 << 0) > 0 } /// Returns true if and only if this state has had at least one pattern /// ID added to it. /// /// This is an internal-only flag that permits the representation to save /// space in the common case of an NFA with one pattern in it. In that /// case, a match state can only ever have exactly one pattern ID: /// PatternID::ZERO. So there's no need to represent it. fn has_pattern_ids(&self) -> bool { self.0[0] & (1 << 1) > 0 } /// Returns true if and only if this state is marked as having been created /// from a transition over a word byte. This is useful for checking whether /// a word boundary assertion is true or not, which requires look-behind /// (whether the current state came from a word byte or not) and look-ahead /// (whether the transition byte is a word byte or not). /// /// Since states with this set are distinct from states that don't have /// this set (even if they are otherwise equivalent), callers should not /// set this assertion unless the underlying NFA has at least one word /// boundary assertion somewhere. Otherwise, a superfluous number of states /// may be created. fn is_from_word(&self) -> bool { self.0[0] & (1 << 2) > 0 } /// Returns true if and only if this state is marked as being inside of a /// CRLF terminator. In the forward direction, this means the state was /// created after seeing a `\r`. In the reverse direction, this means the /// state was created after seeing a `\n`. fn is_half_crlf(&self) -> bool { self.0[0] & (1 << 3) > 0 } /// The set of look-behind assertions that were true in the transition that /// created this state. /// /// Generally, this should be empty if 'look_need' is empty, since there is /// no reason to track which look-behind assertions are true if the state /// has no conditional epsilon transitions. /// /// Satisfied look-ahead assertions are not tracked in states. Instead, /// these are re-computed on demand via epsilon closure when computing the /// transition function. fn look_have(&self) -> LookSet { LookSet::read_repr(&self.0[1..]) } /// The set of look-around (both behind and ahead) assertions that appear /// at least once in this state's set of NFA states. /// /// This is used to determine whether the epsilon closure needs to be /// re-computed when computing the transition function. Namely, if the /// state has no conditional epsilon transitions, then there is no need /// to re-compute the epsilon closure. fn look_need(&self) -> LookSet { LookSet::read_repr(&self.0[3..]) } /// Returns the total number of match pattern IDs in this state. /// /// If this state is not a match state, then this always returns 0. fn match_len(&self) -> usize { if !self.is_match() { return 0; } else if !self.has_pattern_ids() { 1 } else { self.encoded_pattern_len() } } /// Returns the pattern ID for this match state at the given index. /// /// If the given index is greater than or equal to `match_len()` for this /// state, then this could panic or return incorrect results. fn match_pattern(&self, index: usize) -> PatternID { if !self.has_pattern_ids() { PatternID::ZERO } else { let offset = 9 + index * PatternID::SIZE; // This is OK since we only ever serialize valid PatternIDs to // states. wire::read_pattern_id_unchecked(&self.0[offset..]).0 } } /// Returns a copy of all match pattern IDs in this state. If this state /// is not a match state, then this returns None. fn match_pattern_ids(&self) -> Option<Vec<PatternID>> { if !self.is_match() { return None; } let mut pids = alloc::vec![]; self.iter_match_pattern_ids(|pid| pids.push(pid)); Some(pids) } /// Calls the given function on every pattern ID in this state. fn iter_match_pattern_ids<F: FnMut(PatternID)>(&self, mut f: F) { if !self.is_match() { return; } // As an optimization for a very common case, when this is a match // state for an NFA with only one pattern, we don't actually write the // pattern ID to the state representation. Instead, we know it must // be there since it is the only possible choice. if !self.has_pattern_ids() { f(PatternID::ZERO); return; } let mut pids = &self.0[9..self.pattern_offset_end()]; while !pids.is_empty() { let pid = wire::read_u32(pids); pids = &pids[PatternID::SIZE..]; // This is OK since we only ever serialize valid PatternIDs to // states. And since pattern IDs can never exceed a usize, the // unwrap is OK. f(PatternID::new_unchecked(usize::try_from(pid).unwrap())); } } /// Calls the given function on every NFA state ID in this state. fn iter_nfa_state_ids<F: FnMut(StateID)>(&self, mut f: F) { let mut sids = &self.0[self.pattern_offset_end()..]; let mut prev = 0i32; while !sids.is_empty() { let (delta, nr) = read_vari32(sids); sids = &sids[nr..]; let sid = prev + delta; prev = sid; // This is OK since we only ever serialize valid StateIDs to // states. And since state IDs can never exceed an isize, they must // always be able to fit into a usize, and thus cast is OK. f(StateID::new_unchecked(sid.as_usize())) } } /// Returns the offset into this state's representation where the pattern /// IDs end and the NFA state IDs begin. fn pattern_offset_end(&self) -> usize { let encoded = self.encoded_pattern_len(); if encoded == 0 { return 5; } // This arithmetic is OK since we were able to address this many bytes // when writing to the state, thus, it must fit into a usize. encoded.checked_mul(4).unwrap().checked_add(9).unwrap() } /// Returns the total number of *encoded* pattern IDs in this state. /// /// This may return 0 even when this is a match state, since the pattern /// ID `PatternID::ZERO` is not encoded when it's the only pattern ID in /// the match state (the overwhelming common case). fn encoded_pattern_len(&self) -> usize { if !self.has_pattern_ids() { return 0; } // This unwrap is OK since the total number of patterns is always // guaranteed to fit into a usize. usize::try_from(wire::read_u32(&self.0[5..9])).unwrap() } } impl<'a> core::fmt::Debug for Repr<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let mut nfa_ids = alloc::vec![]; self.iter_nfa_state_ids(|sid| nfa_ids.push(sid)); f.debug_struct("Repr") .field("is_match", &self.is_match()) .field("is_from_word", &self.is_from_word()) .field("is_half_crlf", &self.is_half_crlf()) .field("look_have", &self.look_have()) .field("look_need", &self.look_need()) .field("match_pattern_ids", &self.match_pattern_ids()) .field("nfa_state_ids", &nfa_ids) .finish() } } /// ReprVec is a write-only view into the representation of a DFA state. /// /// See Repr for more details on the purpose of this type and also the format. /// /// Note that not all possible combinations of methods may be called. This is /// precisely what the various StateBuilder types encapsulate: they only /// permit valid combinations via Rust's linear typing. struct ReprVec<'a>(&'a mut Vec<u8>); impl<'a> ReprVec<'a> { /// Set this state as a match state. /// /// This should not be exposed explicitly outside of this module. It is /// set automatically when a pattern ID is added. fn set_is_match(&mut self) { self.0[0] |= 1 << 0; } /// Set that this state has pattern IDs explicitly written to it. /// /// This should not be exposed explicitly outside of this module. This is /// used internally as a space saving optimization. Namely, if the state /// is a match state but does not have any pattern IDs written to it, /// then it is automatically inferred to have a pattern ID of ZERO. fn set_has_pattern_ids(&mut self) { self.0[0] |= 1 << 1; } /// Set this state as being built from a transition over a word byte. /// /// Setting this is only necessary when one needs to deal with word /// boundary assertions. Therefore, if the underlying NFA has no word /// boundary assertions, callers should not set this. fn set_is_from_word(&mut self) { self.0[0] |= 1 << 2; } /// Set this state as having seen half of a CRLF terminator. /// /// In the forward direction, this should be set when a `\r` has been seen. /// In the reverse direction, this should be set when a `\n` has been seen. fn set_is_half_crlf(&mut self) { self.0[0] |= 1 << 3; } /// The set of look-behind assertions that were true in the transition that /// created this state. fn look_have(&self) -> LookSet { self.repr().look_have() } /// The set of look-around (both behind and ahead) assertions that appear /// at least once in this state's set of NFA states. fn look_need(&self) -> LookSet { self.repr().look_need() } /// Mutate the set of look-behind assertions that were true in the /// transition that created this state. fn set_look_have(&mut self, mut set: impl FnMut(LookSet) -> LookSet) { set(self.look_have()).write_repr(&mut self.0[1..]); } /// Mutate the set of look-around (both behind and ahead) assertions that /// appear at least once in this state's set of NFA states. fn set_look_need(&mut self, mut set: impl FnMut(LookSet) -> LookSet) { set(self.look_need()).write_repr(&mut self.0[3..]); } /// Add a pattern ID to this state. All match states must have at least /// one pattern ID associated with it. /// /// Callers must never add duplicative pattern IDs. /// /// The order in which patterns are added must correspond to the order /// in which patterns are reported as matches. fn add_match_pattern_id(&mut self, pid: PatternID) { // As a (somewhat small) space saving optimization, in the case where // a matching state has exactly one pattern ID, PatternID::ZERO, we do // not write either the pattern ID or the number of patterns encoded. // Instead, all we do is set the 'is_match' bit on this state. Overall, // this saves 8 bytes per match state for the overwhelming majority of // match states. // // In order to know whether pattern IDs need to be explicitly read or // not, we use another internal-only bit, 'has_pattern_ids', to // indicate whether they have been explicitly written or not. if !self.repr().has_pattern_ids() { if pid == PatternID::ZERO { self.set_is_match(); return; } // Make room for 'close_match_pattern_ids' to write the total // number of pattern IDs written. self.0.extend(core::iter::repeat(0).take(PatternID::SIZE)); self.set_has_pattern_ids(); // If this was already a match state, then the only way that's // possible when the state doesn't have pattern IDs is if // PatternID::ZERO was added by the caller previously. In this // case, we are now adding a non-ZERO pattern ID after it, in // which case, we want to make sure to represent ZERO explicitly // now. if self.repr().is_match() { write_u32(self.0, 0) } else { // Otherwise, just make sure the 'is_match' bit is set. self.set_is_match(); } } write_u32(self.0, pid.as_u32()); } /// Indicate that no more pattern IDs will be added to this state. /// /// Once this is called, callers must not call it or 'add_match_pattern_id' /// again. /// /// This should not be exposed explicitly outside of this module. It /// should be called only when converting a StateBuilderMatches into a /// StateBuilderNFA. fn close_match_pattern_ids(&mut self) { // If we never wrote any pattern IDs, then there's nothing to do here. if !self.repr().has_pattern_ids() { return; } let patsize = PatternID::SIZE; let pattern_bytes = self.0.len() - 9; // Every pattern ID uses 4 bytes, so number of bytes should be // divisible by 4. assert_eq!(pattern_bytes % patsize, 0); // This unwrap is OK since we are guaranteed that the maximum number // of possible patterns fits into a u32. let count32 = u32::try_from(pattern_bytes / patsize).unwrap(); wire::NE::write_u32(count32, &mut self.0[5..9]); } /// Add an NFA state ID to this state. The order in which NFA states are /// added matters. It is the caller's responsibility to ensure that /// duplicate NFA state IDs are not added. fn add_nfa_state_id(&mut self, prev: &mut StateID, sid: StateID) { let delta = sid.as_i32() - prev.as_i32(); write_vari32(self.0, delta); *prev = sid; } /// Return a read-only view of this state's representation. fn repr(&self) -> Repr<'_> { Repr(self.0.as_slice()) } } /// Write a signed 32-bit integer using zig-zag encoding. /// /// https://developers.google.com/protocol-buffers/docs/encoding#varints fn write_vari32(data: &mut Vec<u8>, n: i32) { let mut un = n.to_bits() << 1; if n < 0 { un = !un; } write_varu32(data, un) } /// Read a signed 32-bit integer using zig-zag encoding. Also, return the /// number of bytes read. /// /// https://developers.google.com/protocol-buffers/docs/encoding#varints fn read_vari32(data: &[u8]) -> (i32, usize) { let (un, i) = read_varu32(data); let mut n = i32::from_bits(un >> 1); if un & 1 != 0 { n = !n; } (n, i) } /// Write an unsigned 32-bit integer as a varint. In essence, `n` is written /// as a sequence of bytes where all bytes except for the last one have the /// most significant bit set. The least significant 7 bits correspond to the /// actual bits of `n`. So in the worst case, a varint uses 5 bytes, but in /// very common cases, it uses fewer than 4. /// /// https://developers.google.com/protocol-buffers/docs/encoding#varints fn write_varu32(data: &mut Vec<u8>, mut n: u32) { while n >= 0b1000_0000 { data.push(n.low_u8() | 0b1000_0000); n >>= 7; } data.push(n.low_u8()); } /// Read an unsigned 32-bit varint. Also, return the number of bytes read. /// /// https://developers.google.com/protocol-buffers/docs/encoding#varints fn read_varu32(data: &[u8]) -> (u32, usize) { // N.B. We can assume correctness here since we know that all varuints are // written with write_varu32. Hence, the 'as' uses and unchecked arithmetic // is all okay. let mut n: u32 = 0; let mut shift: u32 = 0; for (i, &b) in data.iter().enumerate() { if b < 0b1000_0000 { return (n | (u32::from(b) << shift), i + 1); } n |= (u32::from(b) & 0b0111_1111) << shift; shift += 7; } (0, 0) } /// Push a native-endian encoded `n` on to `dst`. fn write_u32(dst: &mut Vec<u8>, n: u32) { use crate::util::wire::NE; let start = dst.len(); dst.extend(core::iter::repeat(0).take(mem::size_of::<u32>())); NE::write_u32(n, &mut dst[start..]); } #[cfg(test)] mod tests { use alloc::vec; use quickcheck::quickcheck; use super::*; #[cfg(not(miri))] quickcheck! { fn prop_state_read_write_nfa_state_ids(sids: Vec<StateID>) -> bool { // Builders states do not permit duplicate IDs. let sids = dedup_state_ids(sids); let mut b = StateBuilderEmpty::new().into_matches().into_nfa(); for &sid in &sids { b.add_nfa_state_id(sid); } let s = b.to_state(); let mut got = vec![]; s.iter_nfa_state_ids(|sid| got.push(sid)); got == sids } fn prop_state_read_write_pattern_ids(pids: Vec<PatternID>) -> bool { // Builders states do not permit duplicate IDs. let pids = dedup_pattern_ids(pids); let mut b = StateBuilderEmpty::new().into_matches(); for &pid in &pids { b.add_match_pattern_id(pid); } let s = b.into_nfa().to_state(); let mut got = vec![]; s.iter_match_pattern_ids(|pid| got.push(pid)); got == pids } fn prop_state_read_write_nfa_state_and_pattern_ids( sids: Vec<StateID>, pids: Vec<PatternID> ) -> bool { // Builders states do not permit duplicate IDs. let sids = dedup_state_ids(sids); let pids = dedup_pattern_ids(pids); let mut b = StateBuilderEmpty::new().into_matches(); for &pid in &pids { b.add_match_pattern_id(pid); } let mut b = b.into_nfa(); for &sid in &sids { b.add_nfa_state_id(sid); } let s = b.to_state(); let mut got_pids = vec![]; s.iter_match_pattern_ids(|pid| got_pids.push(pid)); let mut got_sids = vec![]; s.iter_nfa_state_ids(|sid| got_sids.push(sid)); got_pids == pids && got_sids == sids } } quickcheck! { fn prop_read_write_varu32(n: u32) -> bool { let mut buf = vec![]; write_varu32(&mut buf, n); let (got, nread) = read_varu32(&buf); nread == buf.len() && got == n } fn prop_read_write_vari32(n: i32) -> bool { let mut buf = vec![]; write_vari32(&mut buf, n); let (got, nread) = read_vari32(&buf); nread == buf.len() && got == n } } #[cfg(not(miri))] fn dedup_state_ids(sids: Vec<StateID>) -> Vec<StateID> { let mut set = alloc::collections::BTreeSet::new(); let mut deduped = vec![]; for sid in sids { if set.contains(&sid) { continue; } set.insert(sid); deduped.push(sid); } deduped } #[cfg(not(miri))] fn dedup_pattern_ids(pids: Vec<PatternID>) -> Vec<PatternID> { let mut set = alloc::collections::BTreeSet::new(); let mut deduped = vec![]; for pid in pids { if set.contains(&pid) { continue; } set.insert(pid); deduped.push(pid); } deduped } } <file_sep>/testdata/regression.toml # See: https://github.com/rust-lang/regex/issues/48 [[test]] name = "invalid-regex-no-crash-100" regex = '(*)' haystack = "" matches = [] compiles = false # See: https://github.com/rust-lang/regex/issues/48 [[test]] name = "invalid-regex-no-crash-200" regex = '(?:?)' haystack = "" matches = [] compiles = false # See: https://github.com/rust-lang/regex/issues/48 [[test]] name = "invalid-regex-no-crash-300" regex = '(?)' haystack = "" matches = [] compiles = false # See: https://github.com/rust-lang/regex/issues/48 [[test]] name = "invalid-regex-no-crash-400" regex = '*' haystack = "" matches = [] compiles = false # See: https://github.com/rust-lang/regex/issues/75 [[test]] name = "unsorted-binary-search-100" regex = '(?i-u)[a_]+' haystack = "A_" matches = [[0, 2]] # See: https://github.com/rust-lang/regex/issues/75 [[test]] name = "unsorted-binary-search-200" regex = '(?i-u)[A_]+' haystack = "a_" matches = [[0, 2]] # See: https://github.com/rust-lang/regex/issues/76 [[test]] name = "unicode-case-lower-nocase-flag" regex = '(?i)\p{Ll}+' haystack = "ΛΘΓΔα" matches = [[0, 10]] # See: https://github.com/rust-lang/regex/issues/99 [[test]] name = "negated-char-class-100" regex = '(?i)[^x]' haystack = "x" matches = [] # See: https://github.com/rust-lang/regex/issues/99 [[test]] name = "negated-char-class-200" regex = '(?i)[^x]' haystack = "X" matches = [] # See: https://github.com/rust-lang/regex/issues/101 [[test]] name = "ascii-word-underscore" regex = '[[:word:]]' haystack = "_" matches = [[0, 1]] # See: https://github.com/rust-lang/regex/issues/129 [[test]] name = "captures-repeat" regex = '([a-f]){2}(?P<foo>[x-z])' haystack = "abx" matches = [ [[0, 3], [1, 2], [2, 3]], ] # See: https://github.com/rust-lang/regex/issues/153 [[test]] name = "alt-in-alt-100" regex = 'ab?|$' haystack = "az" matches = [[0, 1], [2, 2]] # See: https://github.com/rust-lang/regex/issues/153 [[test]] name = "alt-in-alt-200" regex = '^(?:.*?)(?:\n|\r\n?|$)' haystack = "ab\rcd" matches = [[0, 3]] # See: https://github.com/rust-lang/regex/issues/169 [[test]] name = "leftmost-first-prefix" regex = 'z*azb' haystack = "azb" matches = [[0, 3]] # See: https://github.com/rust-lang/regex/issues/191 [[test]] name = "many-alternates" regex = '1|2|3|4|5|6|7|8|9|10|int' haystack = "int" matches = [[0, 3]] # See: https://github.com/rust-lang/regex/issues/204 [[test]] name = "word-boundary-alone-100" regex = '\b' haystack = "Should this (work?)" matches = [[0, 0], [6, 6], [7, 7], [11, 11], [13, 13], [17, 17]] # See: https://github.com/rust-lang/regex/issues/204 [[test]] name = "word-boundary-alone-200" regex = '\b' haystack = "a b c" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] # See: https://github.com/rust-lang/regex/issues/264 [[test]] name = "word-boundary-ascii-no-capture" regex = '\B' haystack = "\U00028F3E" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] unicode = false utf8 = false # See: https://github.com/rust-lang/regex/issues/264 [[test]] name = "word-boundary-ascii-capture" regex = '(?:\B)' haystack = "\U00028F3E" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] unicode = false utf8 = false # See: https://github.com/rust-lang/regex/issues/268 [[test]] name = "partial-anchor" regex = '^a|b' haystack = "ba" matches = [[0, 1]] # See: https://github.com/rust-lang/regex/issues/271 [[test]] name = "endl-or-word-boundary" regex = '(?m:$)|(?-u:\b)' haystack = "\U0006084E" matches = [[4, 4]] # See: https://github.com/rust-lang/regex/issues/271 [[test]] name = "zero-or-end" regex = '(?i-u:\x00)|$' haystack = "\U000E682F" matches = [[4, 4]] # See: https://github.com/rust-lang/regex/issues/271 [[test]] name = "y-or-endl" regex = '(?i-u:y)|(?m:$)' haystack = "\U000B4331" matches = [[4, 4]] # See: https://github.com/rust-lang/regex/issues/271 [[test]] name = "word-boundary-start-x" regex = '(?u:\b)^(?-u:X)' haystack = "X" matches = [[0, 1]] # See: https://github.com/rust-lang/regex/issues/271 [[test]] name = "word-boundary-ascii-start-x" regex = '(?-u:\b)^(?-u:X)' haystack = "X" matches = [[0, 1]] # See: https://github.com/rust-lang/regex/issues/271 [[test]] name = "end-not-word-boundary" regex = '$\B' haystack = "\U0005C124\U000B576C" matches = [[8, 8]] unicode = false utf8 = false # See: https://github.com/rust-lang/regex/issues/280 [[test]] name = "partial-anchor-alternate-begin" regex = '^a|z' haystack = "yyyyya" matches = [] # See: https://github.com/rust-lang/regex/issues/280 [[test]] name = "partial-anchor-alternate-end" regex = 'a$|z' haystack = "ayyyyy" matches = [] # See: https://github.com/rust-lang/regex/issues/289 [[test]] name = "lits-unambiguous-100" regex = '(?:ABC|CDA|BC)X' haystack = "CDAX" matches = [[0, 4]] # See: https://github.com/rust-lang/regex/issues/291 [[test]] name = "lits-unambiguous-200" regex = '((IMG|CAM|MG|MB2)_|(DSCN|CIMG))(?P<n>[0-9]+)$' haystack = "CIMG2341" matches = [ [[0, 8], [0, 4], [], [0, 4], [4, 8]], ] # See: https://github.com/rust-lang/regex/issues/303 # # 2022-09-19: This has now been "properly" fixed in that empty character # classes are fully supported as something that can never match. This test # used to be marked as 'compiles = false', but now it works. [[test]] name = "negated-full-byte-range" regex = '[^\x00-\xFF]' haystack = "" matches = [] compiles = true unicode = false utf8 = false # See: https://github.com/rust-lang/regex/issues/321 [[test]] name = "strange-anchor-non-complete-prefix" regex = 'a^{2}' haystack = "" matches = [] # See: https://github.com/rust-lang/regex/issues/321 [[test]] name = "strange-anchor-non-complete-suffix" regex = '${2}a' haystack = "" matches = [] # See: https://github.com/rust-lang/regex/issues/334 # See: https://github.com/rust-lang/regex/issues/557 [[test]] name = "captures-after-dfa-premature-end-100" regex = 'a(b*(X|$))?' haystack = "abcbX" matches = [ [[0, 1], [], []], ] # See: https://github.com/rust-lang/regex/issues/334 # See: https://github.com/rust-lang/regex/issues/557 [[test]] name = "captures-after-dfa-premature-end-200" regex = 'a(bc*(X|$))?' haystack = "abcbX" matches = [ [[0, 1], [], []], ] # See: https://github.com/rust-lang/regex/issues/334 # See: https://github.com/rust-lang/regex/issues/557 [[test]] name = "captures-after-dfa-premature-end-300" regex = '(aa$)?' haystack = "aaz" matches = [ [[0, 0], []], [[1, 1], []], [[2, 2], []], [[3, 3], []], ] # Plucked from "Why aren’t regular expressions a lingua franca? an empirical # study on the re-use and portability of regular expressions", The ACM Joint # European Software Engineering Conference and Symposium on the Foundations of # Software Engineering (ESEC/FSE), 2019. # # Link: https://dl.acm.org/doi/pdf/10.1145/3338906.3338909 [[test]] name = "captures-after-dfa-premature-end-400" regex = '(a)\d*\.?\d+\b' haystack = "a0.0c" matches = [ [[0, 2], [0, 1]], ] # See: https://github.com/rust-lang/regex/issues/437 [[test]] name = "literal-panic" regex = 'typename type\-parameter\-[0-9]+\-[0-9]+::.+' haystack = "test" matches = [] # See: https://github.com/rust-lang/regex/issues/527 [[test]] name = "empty-flag-expr" regex = '(?:(?:(?x)))' haystack = "" matches = [[0, 0]] # See: https://github.com/rust-lang/regex/issues/533 #[[tests]] #name = "blank-matches-nothing-between-space-and-tab" #regex = '[[:blank:]]' #input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F' #match = false #unescape = true # See: https://github.com/rust-lang/regex/issues/533 #[[tests]] #name = "blank-matches-nothing-between-space-and-tab-inverted" #regex = '^[[:^blank:]]+$' #input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F' #match = true #unescape = true # See: https://github.com/rust-lang/regex/issues/555 [[test]] name = "invalid-repetition" regex = '(?m){1,1}' haystack = "" matches = [] compiles = false # See: https://github.com/rust-lang/regex/issues/640 [[test]] name = "flags-are-unset" regex = '(?:(?i)foo)|Bar' haystack = "foo Foo bar Bar" matches = [[0, 3], [4, 7], [12, 15]] # Note that 'Ј' is not 'j', but cyrillic Je # https://en.wikipedia.org/wiki/Je_(Cyrillic) # # See: https://github.com/rust-lang/regex/issues/659 [[test]] name = "empty-group-with-unicode" regex = '(?:)Ј01' haystack = 'zЈ01' matches = [[1, 5]] # See: https://github.com/rust-lang/regex/issues/579 [[test]] name = "word-boundary-weird" regex = '\b..\b' haystack = "I have 12, he has 2!" matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]] # See: https://github.com/rust-lang/regex/issues/579 [[test]] name = "word-boundary-weird-ascii" regex = '\b..\b' haystack = "I have 12, he has 2!" matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]] unicode = false utf8 = false # See: https://github.com/rust-lang/regex/issues/579 [[test]] name = "word-boundary-weird-minimal-ascii" regex = '\b..\b' haystack = "az,,b" matches = [[0, 2], [2, 4]] unicode = false utf8 = false # See: https://github.com/BurntSushi/ripgrep/issues/1203 [[test]] name = "reverse-suffix-100" regex = '[0-4][0-4][0-4]000' haystack = "153.230000" matches = [[4, 10]] # See: https://github.com/BurntSushi/ripgrep/issues/1203 [[test]] name = "reverse-suffix-200" regex = '[0-9][0-9][0-9]000' haystack = "153.230000\n" matches = [[4, 10]] # This is a tricky case for the reverse suffix optimization, because it # finds the 'foobar' match but the reverse scan must fail to find a match by # correctly dealing with the word boundary following the 'foobar' literal when # computing the start state. # # This test exists because I tried to break the following assumption that # is currently in the code: that if a suffix is found and the reverse scan # succeeds, then it's guaranteed that there is an overall match. Namely, the # 'is_match' routine does *not* do another forward scan in this case because of # this assumption. [[test]] name = "reverse-suffix-300" regex = '\w+foobar\b' haystack = "xyzfoobarZ" matches = [] unicode = false utf8 = false # See: https://github.com/BurntSushi/ripgrep/issues/1247 [[test]] name = "stops" regex = '\bs(?:[ab])' haystack = 's\xE4' matches = [] unescape = true utf8 = false # See: https://github.com/BurntSushi/ripgrep/issues/1247 [[test]] name = "stops-ascii" regex = '(?-u:\b)s(?:[ab])' haystack = 's\xE4' matches = [] unescape = true utf8 = false # See: https://github.com/rust-lang/regex/issues/850 [[test]] name = "adjacent-line-boundary-100" regex = '(?m)^(?:[^ ]+?)$' haystack = "line1\nline2" matches = [[0, 5], [6, 11]] # Continued. [[test]] name = "adjacent-line-boundary-200" regex = '(?m)^(?:[^ ]+?)$' haystack = "A\nB" matches = [[0, 1], [2, 3]] # There is no issue for this bug. [[test]] name = "anchored-prefix-100" regex = '^a[[:^space:]]' haystack = "a " matches = [] # There is no issue for this bug. [[test]] name = "anchored-prefix-200" regex = '^a[[:^space:]]' haystack = "foo boo a" matches = [] # There is no issue for this bug. [[test]] name = "anchored-prefix-300" regex = '^-[a-z]' haystack = "r-f" matches = [] # Tests that a possible Aho-Corasick optimization works correctly. It only # kicks in when we have a lot of literals. By "works correctly," we mean that # leftmost-first match semantics are properly respected. That is, samwise # should match, not sam. # # There is no issue for this bug. [[test]] name = "aho-corasick-100" regex = 'samwise|sam|a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p|q|r|s|t|u|v|w|x|y|z|A|B|C|D|E|F|G|H|I|J|K|L|M|N|O|P|Q|R|S|T|U|V|W|X|Y|Z' haystack = "samwise" matches = [[0, 7]] # See: https://github.com/rust-lang/regex/issues/921 [[test]] name = "interior-anchor-capture" regex = '(a$)b$' haystack = 'ab' matches = [] # I found this bug in the course of adding some of the regexes that Ruff uses # to rebar. It turns out that the lazy DFA was finding a match that was being # rejected by the one-pass DFA. Yikes. I then minimized the regex and haystack. # # Source: https://github.com/charliermarsh/ruff/blob/a919041ddaa64cdf6f216f90dd0480dab69fd3ba/crates/ruff/src/rules/pycodestyle/rules/whitespace_around_keywords.rs#L52 [[test]] name = "ruff-whitespace-around-keywords" regex = '^(a|ab)$' haystack = "ab" anchored = true unicode = false utf8 = true matches = [[[0, 2], [0, 2]]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-0" regex = '(?:(?-u:\b)|(?u:h))+' haystack = "h" unicode = true utf8 = false matches = [[0, 0], [1, 1]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-1" regex = '(?u:\B)' haystack = "鋸" unicode = true utf8 = false matches = [] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-2" regex = '(?:(?u:\b)|(?s-u:.))+' haystack = "oB" unicode = true utf8 = false matches = [[0, 0], [1, 2]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-3" regex = '(?:(?-u:\B)|(?su:.))+' haystack = "\U000FEF80" unicode = true utf8 = false matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-3-utf8" regex = '(?:(?-u:\B)|(?su:.))+' haystack = "\U000FEF80" unicode = true utf8 = true matches = [[0, 0], [4, 4]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-4" regex = '(?m:$)(?m:^)(?su:.)' haystack = "\n‣" unicode = true utf8 = false matches = [[0, 1]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-5" regex = '(?m:$)^(?m:^)' haystack = "\n" unicode = true utf8 = false matches = [[0, 0]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-6" regex = '(?P<kp>(?iu:do)(?m:$))*' haystack = "dodo" unicode = true utf8 = false matches = [ [[0, 0], []], [[1, 1], []], [[2, 4], [2, 4]], ] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-7" regex = '(?u:\B)' haystack = "䡁" unicode = true utf8 = false matches = [] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-8" regex = '(?:(?-u:\b)|(?u:[\u{0}-W]))+' haystack = "0" unicode = true utf8 = false matches = [[0, 0], [1, 1]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-9" regex = '((?m:$)(?-u:\B)(?s-u:.)(?-u:\B)$)' haystack = "\n\n" unicode = true utf8 = false matches = [ [[1, 2], [1, 2]], ] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-10" regex = '(?m:$)(?m:$)^(?su:.)' haystack = "\n\u0081¨\u200a" unicode = true utf8 = false matches = [[0, 1]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-11" regex = '(?-u:\B)(?m:^)' haystack = "0\n" unicode = true utf8 = false matches = [[2, 2]] # From: https://github.com/rust-lang/regex/issues/429 [[test]] name = "i429-12" regex = '(?:(?u:\b)|(?-u:.))+' haystack = "0" unicode = true utf8 = false matches = [[0, 0], [1, 1]] # From: https://github.com/rust-lang/regex/issues/969 [[test]] name = "i969" regex = 'c.*d\z' haystack = "ababcd" bounds = [4, 6] search-kind = "earliest" matches = [[4, 6]] # I found this during the regex-automata migration. This is the fowler basic # 154 test, but without anchored = true and without a match limit. # # This test caught a subtle bug in the hybrid reverse DFA search, where it # would skip over the termination condition if it entered a start state. This # was a double bug. Firstly, the reverse DFA shouldn't have had start states # specialized in the first place, and thus it shouldn't have possible to detect # that the DFA had entered a start state. The second bug was that the start # state handling was incorrect by jumping over the termination condition. [[test]] name = "fowler-basic154-unanchored" regex = '''a([bc]*)c*''' haystack = '''abc''' matches = [[[0, 3], [1, 3]]] # From: https://github.com/rust-lang/regex/issues/981 # # This was never really a problem in the new architecture because the # regex-automata engines are far more principled about how they deal with # look-around. (This was one of the many reasons I wanted to re-work the # original regex crate engines.) [[test]] name = "word-boundary-interact-poorly-with-literal-optimizations" regex = '(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))' haystack = 'ubi-Darwin-x86_64.tar.gz' matches = [] # This was found during fuzz testing of regex. It provoked a panic in the meta # engine as a result of the reverse suffix optimization. Namely, it hit a case # where a suffix match was found, a corresponding reverse match was found, but # the forward search turned up no match. The forward search should always match # if the suffix and reverse search match. # # This in turn uncovered an inconsistency between the PikeVM and the DFA (lazy # and fully compiled) engines. It was caused by a mishandling of the collection # of NFA state IDs in the generic determinization code (which is why both types # of DFA were impacted). Namely, when a fail state was encountered (that's the # `[^\s\S]` in the pattern below), then it would just stop collecting states. # But that's not correct since a later state could lead to a match. [[test]] name = "impossible-branch" regex = '.*[^\s\S]A|B' haystack = "B" matches = [[0, 1]] # This was found during fuzz testing in regex-lite. The regex crate never # suffered from this bug, but it causes regex-lite to incorrectly compile # captures. [[test]] name = "captures-wrong-order" regex = '(a){0}(a)' haystack = 'a' matches = [[[0, 1], [], [0, 1]]] # This tests a bug in how quit states are handled in the DFA. At some point # during development, the DFAs were tweaked slightly such that if they hit # a quit state (which means, they hit a byte that the caller configured should # stop the search), then it might not return an error necessarily. Namely, if a # match had already been found, then it would be returned instead of an error. # # But this is actually wrong! Why? Because even though a match had been found, # it wouldn't be fully correct to return it once a quit state has been seen # because you can't determine whether the match offset returned is the correct # greedy/leftmost-first match. Since you can't complete the search as requested # by the caller, the DFA should just stop and return an error. # # Interestingly, this does seem to produce an unavoidable difference between # 'try_is_match().unwrap()' and 'try_find().unwrap().is_some()' for the DFAs. # The former will stop immediately once a match is known to occur and return # 'Ok(true)', where as the latter could find the match but quit with an # 'Err(..)' first. # # Thankfully, I believe this inconsistency between 'is_match()' and 'find()' # cannot be observed in the higher level meta regex API because it specifically # will try another engine that won't fail in the case of a DFA failing. # # This regression happened in the regex crate rewrite, but before anything got # released. [[test]] name = "negated-unicode-word-boundary-dfa-fail" regex = '\B.*' haystack = "!\u02D7" matches = [[0, 3]] # This failure was found in the *old* regex crate (prior to regex 1.9), but # I didn't investigate why. My best guess is that it's a literal optimization # bug. It didn't occur in the rewrite. [[test]] name = "missed-match" regex = 'e..+e.ee>' haystack = 'Zeee.eZZZZZZZZeee>eeeeeee>' matches = [[1, 26]] # This test came from the 'ignore' crate and tripped a bug in how accelerated # DFA states were handled in an overlapping search. [[test]] name = "regex-to-glob" regex = ['(?-u)^path1/[^/]*$'] haystack = "path1/foo" matches = [[0, 9]] utf8 = false match-kind = "all" search-kind = "overlapping" # See: https://github.com/rust-lang/regex/issues/1060 [[test]] name = "reverse-inner-plus-shorter-than-expected" regex = '(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})' haystack = '102:12:39' matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]] # Like reverse-inner-plus-shorter-than-expected, but using a far simpler regex # to demonstrate the extent of the rot. Sigh. # # See: https://github.com/rust-lang/regex/issues/1060 [[test]] name = "reverse-inner-short" regex = '(?:([0-9][0-9][0-9]):)?([0-9][0-9]):([0-9][0-9])' haystack = '102:12:39' matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]] # This regression test was found via the RegexSet APIs. It triggered a # particular code path where a regex was compiled with 'All' match semantics # (to support overlapping search), but got funneled down into a standard # leftmost search when calling 'is_match'. This is fine on its own, but the # leftmost search will use a prefilter and that's where this went awry. # # Namely, since 'All' semantics were used, the aho-corasick prefilter was # incorrectly compiled with 'Standard' semantics. This was wrong because # 'Standard' immediately attempts to report a match at every position, even if # that would mean reporting a match past the leftmost match before reporting # the leftmost match. This breaks the prefilter contract of never having false # negatives and leads overall to the engine not finding a match. # # See: https://github.com/rust-lang/regex/issues/1070 [[test]] name = "prefilter-with-aho-corasick-standard-semantics" regex = '(?m)^ *v [0-9]' haystack = 'v 0' matches = [ { id = 0, spans = [[0, 3]] }, ] match-kind = "all" search-kind = "overlapping" unicode = true utf8 = true <file_sep>/regex-lite/src/pool.rs use core::panic::{RefUnwindSafe, UnwindSafe}; use alloc::{boxed::Box, vec, vec::Vec}; use crate::pikevm; // Literally the only reason that this crate requires 'std' currently. // // In regex-automata, we support the no-std use case by rolling our own // spin-lock based Mutex. That's questionable on its own, but it's not clear if // we should be doing that here. It will require introducing non-safe code in a // crate that is otherwise safe. But maybe it's worth doing? use std::sync::Mutex; /// A type alias for our pool of meta::Cache that fixes the type parameters to /// what we use for the meta regex below. pub(crate) type CachePool = Pool<pikevm::Cache, CachePoolFn>; /// Same as above, but for the guard returned by a pool. pub(crate) type CachePoolGuard<'a> = PoolGuard<'a, pikevm::Cache, CachePoolFn>; /// The type of the closure we use to create new caches. We need to spell out /// all of the marker traits or else we risk leaking !MARKER impls. pub(crate) type CachePoolFn = Box<dyn Fn() -> pikevm::Cache + Send + Sync + UnwindSafe + RefUnwindSafe>; /// A thread safe pool utilizing alloc-only features. /// /// Unlike the pool in regex-automata, this has no "fast path." We could add /// it, but it's more code and requires reasoning about safety. pub(crate) struct Pool<T, F> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: F, } // If T is UnwindSafe, then since we provide exclusive access to any // particular value in the pool, it should therefore also be considered // RefUnwindSafe. impl<T: UnwindSafe, F: UnwindSafe> RefUnwindSafe for Pool<T, F> {} impl<T, F> Pool<T, F> { /// Create a new pool. The given closure is used to create values in /// the pool when necessary. pub(crate) const fn new(create: F) -> Pool<T, F> { Pool { stack: Mutex::new(vec![]), create } } } impl<T: Send, F: Fn() -> T> Pool<T, F> { /// Get a value from the pool. This may block if another thread is also /// attempting to retrieve a value from the pool. pub(crate) fn get(&self) -> PoolGuard<'_, T, F> { let mut stack = self.stack.lock().unwrap(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; PoolGuard { pool: self, value: Some(value) } } /// Puts a value back into the pool. Callers don't need to call this. /// Once the guard that's returned by 'get' is dropped, it is put back /// into the pool automatically. fn put_value(&self, value: Box<T>) { let mut stack = self.stack.lock().unwrap(); stack.push(value); } } impl<T: core::fmt::Debug, F> core::fmt::Debug for Pool<T, F> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Pool").field("stack", &self.stack).finish() } } /// A guard that is returned when a caller requests a value from the pool. pub(crate) struct PoolGuard<'a, T: Send, F: Fn() -> T> { /// The pool that this guard is attached to. pool: &'a Pool<T, F>, /// This is None after the guard has been put back into the pool. value: Option<Box<T>>, } impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> { fn drop(&mut self) { if let Some(value) = self.value.take() { self.pool.put_value(value); } } } impl<'a, T: Send, F: Fn() -> T> core::ops::Deref for PoolGuard<'a, T, F> { type Target = T; fn deref(&self) -> &T { self.value.as_deref().unwrap() } } impl<'a, T: Send, F: Fn() -> T> core::ops::DerefMut for PoolGuard<'a, T, F> { fn deref_mut(&mut self) -> &mut T { self.value.as_deref_mut().unwrap() } } impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug for PoolGuard<'a, T, F> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("PoolGuard") .field("pool", &self.pool) .field("value", &self.value) .finish() } } <file_sep>/regex-cli/util.rs use std::{ io::{self, Write}, path::Path, process::Command, }; use {anyhow::Context, bstr::BString}; /// Time an arbitrary operation. pub fn timeit<T>(run: impl FnOnce() -> T) -> (T, std::time::Duration) { let start = std::time::Instant::now(); let t = run(); (t, start.elapsed()) } /// Convenient time an operation that returns a result by packing the duration /// into the `Ok` variant. pub fn timeitr<T, E>( run: impl FnOnce() -> Result<T, E>, ) -> Result<(T, std::time::Duration), E> { let (result, time) = timeit(run); let t = result?; Ok((t, time)) } /// Run rustfmt on the given file. pub fn rustfmt<P: AsRef<Path>>(path: P) -> anyhow::Result<()> { let path = path.as_ref(); let out = Command::new("rustfmt") .arg(path) .output() .context("rustfmt command failed")?; anyhow::ensure!( out.status.success(), "rustfmt {}: {}", path.display(), BString::from(out.stderr), ); Ok(()) } /// A somewhat silly little thing that prints an aligned table of key-value /// pairs. Keys can be any string and values can be anything that implements /// Debug. /// /// This table is used to print little bits of useful information about stuff. #[derive(Debug)] pub struct Table { pairs: Vec<(String, Box<dyn std::fmt::Debug>)>, } impl Table { pub fn empty() -> Table { Table { pairs: vec![] } } pub fn add<D: std::fmt::Debug + 'static>( &mut self, label: &str, value: D, ) { self.pairs.push((label.to_string(), Box::new(value))); } pub fn print<W: io::Write>(&self, wtr: W) -> io::Result<()> { let mut wtr = tabwriter::TabWriter::new(wtr) .alignment(tabwriter::Alignment::Right); for (label, value) in self.pairs.iter() { writeln!(wtr, "{}:\t{:?}", label, value)?; } wtr.flush() } } <file_sep>/regex-automata/src/util/alphabet.rs /*! This module provides APIs for dealing with the alphabets of finite state machines. There are two principal types in this module, [`ByteClasses`] and [`Unit`]. The former defines the alphabet of a finite state machine while the latter represents an element of that alphabet. To a first approximation, the alphabet of all automata in this crate is just a `u8`. Namely, every distinct byte value. All 256 of them. In practice, this can be quite wasteful when building a transition table for a DFA, since it requires storing a state identifier for each element in the alphabet. Instead, we collapse the alphabet of an automaton down into equivalence classes, where every byte in the same equivalence class never discriminates between a match or a non-match from any other byte in the same class. For example, in the regex `[a-z]+`, then you could consider it having an alphabet consisting of two equivalence classes: `a-z` and everything else. In terms of the transitions on an automaton, it doesn't actually require representing every distinct byte. Just the equivalence classes. The downside of equivalence classes is that, of course, searching a haystack deals with individual byte values. Those byte values need to be mapped to their corresponding equivalence class. This is what `ByteClasses` does. In practice, doing this for every state transition has negligible impact on modern CPUs. Moreover, it helps make more efficient use of the CPU cache by (possibly considerably) shrinking the size of the transition table. One last hiccup concerns `Unit`. Namely, because of look-around and how the DFAs in this crate work, we need to add a sentinel value to our alphabet of equivalence classes that represents the "end" of a search. We call that sentinel [`Unit::eoi`] or "end of input." Thus, a `Unit` is either an equivalence class corresponding to a set of bytes, or it is a special "end of input" sentinel. In general, you should not expect to need either of these types unless you're doing lower level shenanigans with DFAs, or even building your own DFAs. (Although, you don't have to use these types to build your own DFAs of course.) For example, if you're walking a DFA's state graph, it's probably useful to make use of [`ByteClasses`] to visit each element in the DFA's alphabet instead of just visiting every distinct `u8` value. The latter isn't necessarily wrong, but it could be potentially very wasteful. */ use crate::util::{ escape::DebugByte, wire::{self, DeserializeError, SerializeError}, }; /// Unit represents a single unit of haystack for DFA based regex engines. /// /// It is not expected for consumers of this crate to need to use this type /// unless they are implementing their own DFA. And even then, it's not /// required: implementors may use other techniques to handle haystack units. /// /// Typically, a single unit of haystack for a DFA would be a single byte. /// However, for the DFAs in this crate, matches are delayed by a single byte /// in order to handle look-ahead assertions (`\b`, `$` and `\z`). Thus, once /// we have consumed the haystack, we must run the DFA through one additional /// transition using a unit that indicates the haystack has ended. /// /// There is no way to represent a sentinel with a `u8` since all possible /// values *may* be valid haystack units to a DFA, therefore this type /// explicitly adds room for a sentinel value. /// /// The sentinel EOI value is always its own equivalence class and is /// ultimately represented by adding 1 to the maximum equivalence class value. /// So for example, the regex `^[a-z]+$` might be split into the following /// equivalence classes: /// /// ```text /// 0 => [\x00-`] /// 1 => [a-z] /// 2 => [{-\xFF] /// 3 => [EOI] /// ``` /// /// Where EOI is the special sentinel value that is always in its own /// singleton equivalence class. #[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] pub struct Unit(UnitKind); #[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] enum UnitKind { /// Represents a byte value, or more typically, an equivalence class /// represented as a byte value. U8(u8), /// Represents the "end of input" sentinel. We regretably use a `u16` /// here since the maximum sentinel value is `256`. Thankfully, we don't /// actually store a `Unit` anywhere, so this extra space shouldn't be too /// bad. EOI(u16), } impl Unit { /// Create a new haystack unit from a byte value. /// /// All possible byte values are legal. However, when creating a haystack /// unit for a specific DFA, one should be careful to only construct units /// that are in that DFA's alphabet. Namely, one way to compact a DFA's /// in-memory representation is to collapse its transitions to a set of /// equivalence classes into a set of all possible byte values. If a DFA /// uses equivalence classes instead of byte values, then the byte given /// here should be the equivalence class. pub fn u8(byte: u8) -> Unit { Unit(UnitKind::U8(byte)) } /// Create a new "end of input" haystack unit. /// /// The value given is the sentinel value used by this unit to represent /// the "end of input." The value should be the total number of equivalence /// classes in the corresponding alphabet. Its maximum value is `256`, /// which occurs when every byte is its own equivalence class. /// /// # Panics /// /// This panics when `num_byte_equiv_classes` is greater than `256`. pub fn eoi(num_byte_equiv_classes: usize) -> Unit { assert!( num_byte_equiv_classes <= 256, "max number of byte-based equivalent classes is 256, but got {}", num_byte_equiv_classes, ); Unit(UnitKind::EOI(u16::try_from(num_byte_equiv_classes).unwrap())) } /// If this unit is not an "end of input" sentinel, then returns its /// underlying byte value. Otherwise return `None`. pub fn as_u8(self) -> Option<u8> { match self.0 { UnitKind::U8(b) => Some(b), UnitKind::EOI(_) => None, } } /// If this unit is an "end of input" sentinel, then return the underlying /// sentinel value that was given to [`Unit::eoi`]. Otherwise return /// `None`. pub fn as_eoi(self) -> Option<u16> { match self.0 { UnitKind::U8(_) => None, UnitKind::EOI(sentinel) => Some(sentinel), } } /// Return this unit as a `usize`, regardless of whether it is a byte value /// or an "end of input" sentinel. In the latter case, the underlying /// sentinel value given to [`Unit::eoi`] is returned. pub fn as_usize(self) -> usize { match self.0 { UnitKind::U8(b) => usize::from(b), UnitKind::EOI(eoi) => usize::from(eoi), } } /// Returns true if and only of this unit is a byte value equivalent to the /// byte given. This always returns false when this is an "end of input" /// sentinel. pub fn is_byte(self, byte: u8) -> bool { self.as_u8().map_or(false, |b| b == byte) } /// Returns true when this unit represents an "end of input" sentinel. pub fn is_eoi(self) -> bool { self.as_eoi().is_some() } /// Returns true when this unit corresponds to an ASCII word byte. /// /// This always returns false when this unit represents an "end of input" /// sentinel. pub fn is_word_byte(self) -> bool { self.as_u8().map_or(false, crate::util::utf8::is_word_byte) } } impl core::fmt::Debug for Unit { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match self.0 { UnitKind::U8(b) => write!(f, "{:?}", DebugByte(b)), UnitKind::EOI(_) => write!(f, "EOI"), } } } /// A representation of byte oriented equivalence classes. /// /// This is used in a DFA to reduce the size of the transition table. This can /// have a particularly large impact not only on the total size of a dense DFA, /// but also on compile times. /// /// The essential idea here is that the alphabet of a DFA is shrunk from the /// usual 256 distinct byte values down to a set of equivalence classes. The /// guarantee you get is that any byte belonging to the same equivalence class /// can be treated as if it were any other byte in the same class, and the /// result of a search wouldn't change. /// /// # Example /// /// This example shows how to get byte classes from an /// [`NFA`](crate::nfa::thompson::NFA) and ask for the class of various bytes. /// /// ``` /// use regex_automata::nfa::thompson::NFA; /// /// let nfa = NFA::new("[a-z]+")?; /// let classes = nfa.byte_classes(); /// // 'a' and 'z' are in the same class for this regex. /// assert_eq!(classes.get(b'a'), classes.get(b'z')); /// // But 'a' and 'A' are not. /// assert_ne!(classes.get(b'a'), classes.get(b'A')); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Copy)] pub struct ByteClasses([u8; 256]); impl ByteClasses { /// Creates a new set of equivalence classes where all bytes are mapped to /// the same class. #[inline] pub fn empty() -> ByteClasses { ByteClasses([0; 256]) } /// Creates a new set of equivalence classes where each byte belongs to /// its own equivalence class. #[inline] pub fn singletons() -> ByteClasses { let mut classes = ByteClasses::empty(); for b in 0..=255 { classes.set(b, b); } classes } /// Deserializes a byte class map from the given slice. If the slice is of /// insufficient length or otherwise contains an impossible mapping, then /// an error is returned. Upon success, the number of bytes read along with /// the map are returned. The number of bytes read is always a multiple of /// 8. pub(crate) fn from_bytes( slice: &[u8], ) -> Result<(ByteClasses, usize), DeserializeError> { wire::check_slice_len(slice, 256, "byte class map")?; let mut classes = ByteClasses::empty(); for (b, &class) in slice[..256].iter().enumerate() { classes.set(u8::try_from(b).unwrap(), class); } // We specifically don't use 'classes.iter()' here because that // iterator depends on 'classes.alphabet_len()' being correct. But that // is precisely the thing we're trying to verify below! for &b in classes.0.iter() { if usize::from(b) >= classes.alphabet_len() { return Err(DeserializeError::generic( "found equivalence class greater than alphabet len", )); } } Ok((classes, 256)) } /// Writes this byte class map to the given byte buffer. if the given /// buffer is too small, then an error is returned. Upon success, the total /// number of bytes written is returned. The number of bytes written is /// guaranteed to be a multiple of 8. pub(crate) fn write_to( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("byte class map")); } for b in 0..=255 { dst[0] = self.get(b); dst = &mut dst[1..]; } Ok(nwrite) } /// Returns the total number of bytes written by `write_to`. pub(crate) fn write_to_len(&self) -> usize { 256 } /// Set the equivalence class for the given byte. #[inline] pub fn set(&mut self, byte: u8, class: u8) { self.0[usize::from(byte)] = class; } /// Get the equivalence class for the given byte. #[inline] pub fn get(&self, byte: u8) -> u8 { self.0[usize::from(byte)] } /// Get the equivalence class for the given haystack unit and return the /// class as a `usize`. #[inline] pub fn get_by_unit(&self, unit: Unit) -> usize { match unit.0 { UnitKind::U8(b) => usize::from(self.get(b)), UnitKind::EOI(b) => usize::from(b), } } /// Create a unit that represents the "end of input" sentinel based on the /// number of equivalence classes. #[inline] pub fn eoi(&self) -> Unit { // The alphabet length already includes the EOI sentinel, hence why // we subtract 1. Unit::eoi(self.alphabet_len().checked_sub(1).unwrap()) } /// Return the total number of elements in the alphabet represented by /// these equivalence classes. Equivalently, this returns the total number /// of equivalence classes. #[inline] pub fn alphabet_len(&self) -> usize { // Add one since the number of equivalence classes is one bigger than // the last one. But add another to account for the final EOI class // that isn't explicitly represented. usize::from(self.0[255]) + 1 + 1 } /// Returns the stride, as a base-2 exponent, required for these /// equivalence classes. /// /// The stride is always the smallest power of 2 that is greater than or /// equal to the alphabet length, and the `stride2` returned here is the /// exponent applied to `2` to get the smallest power. This is done so that /// converting between premultiplied state IDs and indices can be done with /// shifts alone, which is much faster than integer division. #[inline] pub fn stride2(&self) -> usize { let zeros = self.alphabet_len().next_power_of_two().trailing_zeros(); usize::try_from(zeros).unwrap() } /// Returns true if and only if every byte in this class maps to its own /// equivalence class. Equivalently, there are 257 equivalence classes /// and each class contains either exactly one byte or corresponds to the /// singleton class containing the "end of input" sentinel. #[inline] pub fn is_singleton(&self) -> bool { self.alphabet_len() == 257 } /// Returns an iterator over all equivalence classes in this set. #[inline] pub fn iter(&self) -> ByteClassIter<'_> { ByteClassIter { classes: self, i: 0 } } /// Returns an iterator over a sequence of representative bytes from each /// equivalence class within the range of bytes given. /// /// When the given range is unbounded on both sides, the iterator yields /// exactly N items, where N is equivalent to the number of equivalence /// classes. Each item is an arbitrary byte drawn from each equivalence /// class. /// /// This is useful when one is determinizing an NFA and the NFA's alphabet /// hasn't been converted to equivalence classes. Picking an arbitrary byte /// from each equivalence class then permits a full exploration of the NFA /// instead of using every possible byte value and thus potentially saves /// quite a lot of redundant work. /// /// # Example /// /// This shows an example of what a complete sequence of representatives /// might look like from a real example. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; /// /// let nfa = NFA::new("[a-z]+")?; /// let classes = nfa.byte_classes(); /// let reps: Vec<Unit> = classes.representatives(..).collect(); /// // Note that the specific byte values yielded are not guaranteed! /// let expected = vec![ /// Unit::u8(b'\x00'), /// Unit::u8(b'a'), /// Unit::u8(b'{'), /// Unit::eoi(3), /// ]; /// assert_eq!(expected, reps); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Note though, that you can ask for an arbitrary range of bytes, and only /// representatives for that range will be returned: /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; /// /// let nfa = NFA::new("[a-z]+")?; /// let classes = nfa.byte_classes(); /// let reps: Vec<Unit> = classes.representatives(b'A'..=b'z').collect(); /// // Note that the specific byte values yielded are not guaranteed! /// let expected = vec![ /// Unit::u8(b'A'), /// Unit::u8(b'a'), /// ]; /// assert_eq!(expected, reps); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn representatives<R: core::ops::RangeBounds<u8>>( &self, range: R, ) -> ByteClassRepresentatives<'_> { use core::ops::Bound; let cur_byte = match range.start_bound() { Bound::Included(&i) => usize::from(i), Bound::Excluded(&i) => usize::from(i).checked_add(1).unwrap(), Bound::Unbounded => 0, }; let end_byte = match range.end_bound() { Bound::Included(&i) => { Some(usize::from(i).checked_add(1).unwrap()) } Bound::Excluded(&i) => Some(usize::from(i)), Bound::Unbounded => None, }; assert_ne!( cur_byte, usize::MAX, "start range must be less than usize::MAX", ); ByteClassRepresentatives { classes: self, cur_byte, end_byte, last_class: None, } } /// Returns an iterator of the bytes in the given equivalence class. /// /// This is useful when one needs to know the actual bytes that belong to /// an equivalence class. For example, conceptually speaking, accelerating /// a DFA state occurs when a state only has a few outgoing transitions. /// But in reality, what is required is that there are only a small /// number of distinct bytes that can lead to an outgoing transition. The /// difference is that any one transition can correspond to an equivalence /// class which may contains many bytes. Therefore, DFA state acceleration /// considers the actual elements in each equivalence class of each /// outgoing transition. /// /// # Example /// /// This shows an example of how to get all of the elements in an /// equivalence class. /// /// ``` /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; /// /// let nfa = NFA::new("[a-z]+")?; /// let classes = nfa.byte_classes(); /// let elements: Vec<Unit> = classes.elements(Unit::u8(1)).collect(); /// let expected: Vec<Unit> = (b'a'..=b'z').map(Unit::u8).collect(); /// assert_eq!(expected, elements); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn elements(&self, class: Unit) -> ByteClassElements { ByteClassElements { classes: self, class, byte: 0 } } /// Returns an iterator of byte ranges in the given equivalence class. /// /// That is, a sequence of contiguous ranges are returned. Typically, every /// class maps to a single contiguous range. fn element_ranges(&self, class: Unit) -> ByteClassElementRanges { ByteClassElementRanges { elements: self.elements(class), range: None } } } impl Default for ByteClasses { fn default() -> ByteClasses { ByteClasses::singletons() } } impl core::fmt::Debug for ByteClasses { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { if self.is_singleton() { write!(f, "ByteClasses({{singletons}})") } else { write!(f, "ByteClasses(")?; for (i, class) in self.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{:?} => [", class.as_usize())?; for (start, end) in self.element_ranges(class) { if start == end { write!(f, "{:?}", start)?; } else { write!(f, "{:?}-{:?}", start, end)?; } } write!(f, "]")?; } write!(f, ")") } } } /// An iterator over each equivalence class. /// /// The last element in this iterator always corresponds to [`Unit::eoi`]. /// /// This is created by the [`ByteClasses::iter`] method. /// /// The lifetime `'a` refers to the lifetime of the byte classes that this /// iterator was created from. #[derive(Debug)] pub struct ByteClassIter<'a> { classes: &'a ByteClasses, i: usize, } impl<'a> Iterator for ByteClassIter<'a> { type Item = Unit; fn next(&mut self) -> Option<Unit> { if self.i + 1 == self.classes.alphabet_len() { self.i += 1; Some(self.classes.eoi()) } else if self.i < self.classes.alphabet_len() { let class = u8::try_from(self.i).unwrap(); self.i += 1; Some(Unit::u8(class)) } else { None } } } /// An iterator over representative bytes from each equivalence class. /// /// This is created by the [`ByteClasses::representatives`] method. /// /// The lifetime `'a` refers to the lifetime of the byte classes that this /// iterator was created from. #[derive(Debug)] pub struct ByteClassRepresentatives<'a> { classes: &'a ByteClasses, cur_byte: usize, end_byte: Option<usize>, last_class: Option<u8>, } impl<'a> Iterator for ByteClassRepresentatives<'a> { type Item = Unit; fn next(&mut self) -> Option<Unit> { while self.cur_byte < self.end_byte.unwrap_or(256) { let byte = u8::try_from(self.cur_byte).unwrap(); let class = self.classes.get(byte); self.cur_byte += 1; if self.last_class != Some(class) { self.last_class = Some(class); return Some(Unit::u8(byte)); } } if self.cur_byte != usize::MAX && self.end_byte.is_none() { // Using usize::MAX as a sentinel is OK because we ban usize::MAX // from appearing as a start bound in iterator construction. But // why do it this way? Well, we want to return the EOI class // whenever the end of the given range is unbounded because EOI // isn't really a "byte" per se, so the only way it should be // excluded is if there is a bounded end to the range. Therefore, // when the end is unbounded, we just need to know whether we've // reported EOI or not. When we do, we set cur_byte to a value it // can never otherwise be. self.cur_byte = usize::MAX; return Some(self.classes.eoi()); } None } } /// An iterator over all elements in an equivalence class. /// /// This is created by the [`ByteClasses::elements`] method. /// /// The lifetime `'a` refers to the lifetime of the byte classes that this /// iterator was created from. #[derive(Debug)] pub struct ByteClassElements<'a> { classes: &'a ByteClasses, class: Unit, byte: usize, } impl<'a> Iterator for ByteClassElements<'a> { type Item = Unit; fn next(&mut self) -> Option<Unit> { while self.byte < 256 { let byte = u8::try_from(self.byte).unwrap(); self.byte += 1; if self.class.is_byte(self.classes.get(byte)) { return Some(Unit::u8(byte)); } } if self.byte < 257 { self.byte += 1; if self.class.is_eoi() { return Some(Unit::eoi(256)); } } None } } /// An iterator over all elements in an equivalence class expressed as a /// sequence of contiguous ranges. #[derive(Debug)] struct ByteClassElementRanges<'a> { elements: ByteClassElements<'a>, range: Option<(Unit, Unit)>, } impl<'a> Iterator for ByteClassElementRanges<'a> { type Item = (Unit, Unit); fn next(&mut self) -> Option<(Unit, Unit)> { loop { let element = match self.elements.next() { None => return self.range.take(), Some(element) => element, }; match self.range.take() { None => { self.range = Some((element, element)); } Some((start, end)) => { if end.as_usize() + 1 != element.as_usize() || element.is_eoi() { self.range = Some((element, element)); return Some((start, end)); } self.range = Some((start, element)); } } } } } /// A partitioning of bytes into equivalence classes. /// /// A byte class set keeps track of an *approximation* of equivalence classes /// of bytes during NFA construction. That is, every byte in an equivalence /// class cannot discriminate between a match and a non-match. /// /// For example, in the regex `[ab]+`, the bytes `a` and `b` would be in the /// same equivalence class because it never matters whether an `a` or a `b` is /// seen, and no combination of `a`s and `b`s in the text can discriminate a /// match. /// /// Note though that this does not compute the minimal set of equivalence /// classes. For example, in the regex `[ac]+`, both `a` and `c` are in the /// same equivalence class for the same reason that `a` and `b` are in the /// same equivalence class in the aforementioned regex. However, in this /// implementation, `a` and `c` are put into distinct equivalence classes. The /// reason for this is implementation complexity. In the future, we should /// endeavor to compute the minimal equivalence classes since they can have a /// rather large impact on the size of the DFA. (Doing this will likely require /// rethinking how equivalence classes are computed, including changing the /// representation here, which is only able to group contiguous bytes into the /// same equivalence class.) #[cfg(feature = "alloc")] #[derive(Clone, Debug)] pub(crate) struct ByteClassSet(ByteSet); #[cfg(feature = "alloc")] impl Default for ByteClassSet { fn default() -> ByteClassSet { ByteClassSet::empty() } } #[cfg(feature = "alloc")] impl ByteClassSet { /// Create a new set of byte classes where all bytes are part of the same /// equivalence class. pub(crate) fn empty() -> Self { ByteClassSet(ByteSet::empty()) } /// Indicate the the range of byte given (inclusive) can discriminate a /// match between it and all other bytes outside of the range. pub(crate) fn set_range(&mut self, start: u8, end: u8) { debug_assert!(start <= end); if start > 0 { self.0.add(start - 1); } self.0.add(end); } /// Add the contiguous ranges in the set given to this byte class set. pub(crate) fn add_set(&mut self, set: &ByteSet) { for (start, end) in set.iter_ranges() { self.set_range(start, end); } } /// Convert this boolean set to a map that maps all byte values to their /// corresponding equivalence class. The last mapping indicates the largest /// equivalence class identifier (which is never bigger than 255). pub(crate) fn byte_classes(&self) -> ByteClasses { let mut classes = ByteClasses::empty(); let mut class = 0u8; let mut b = 0u8; loop { classes.set(b, class); if b == 255 { break; } if self.0.contains(b) { class = class.checked_add(1).unwrap(); } b = b.checked_add(1).unwrap(); } classes } } /// A simple set of bytes that is reasonably cheap to copy and allocation free. #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub(crate) struct ByteSet { bits: BitSet, } /// The representation of a byte set. Split out so that we can define a /// convenient Debug impl for it while keeping "ByteSet" in the output. #[derive(Clone, Copy, Default, Eq, PartialEq)] struct BitSet([u128; 2]); impl ByteSet { /// Create an empty set of bytes. pub(crate) fn empty() -> ByteSet { ByteSet { bits: BitSet([0; 2]) } } /// Add a byte to this set. /// /// If the given byte already belongs to this set, then this is a no-op. pub(crate) fn add(&mut self, byte: u8) { let bucket = byte / 128; let bit = byte % 128; self.bits.0[usize::from(bucket)] |= 1 << bit; } /// Remove a byte from this set. /// /// If the given byte is not in this set, then this is a no-op. pub(crate) fn remove(&mut self, byte: u8) { let bucket = byte / 128; let bit = byte % 128; self.bits.0[usize::from(bucket)] &= !(1 << bit); } /// Return true if and only if the given byte is in this set. pub(crate) fn contains(&self, byte: u8) -> bool { let bucket = byte / 128; let bit = byte % 128; self.bits.0[usize::from(bucket)] & (1 << bit) > 0 } /// Return true if and only if the given inclusive range of bytes is in /// this set. pub(crate) fn contains_range(&self, start: u8, end: u8) -> bool { (start..=end).all(|b| self.contains(b)) } /// Returns an iterator over all bytes in this set. pub(crate) fn iter(&self) -> ByteSetIter { ByteSetIter { set: self, b: 0 } } /// Returns an iterator over all contiguous ranges of bytes in this set. pub(crate) fn iter_ranges(&self) -> ByteSetRangeIter { ByteSetRangeIter { set: self, b: 0 } } /// Return true if and only if this set is empty. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn is_empty(&self) -> bool { self.bits.0 == [0, 0] } /// Deserializes a byte set from the given slice. If the slice is of /// incorrect length or is otherwise malformed, then an error is returned. /// Upon success, the number of bytes read along with the set are returned. /// The number of bytes read is always a multiple of 8. pub(crate) fn from_bytes( slice: &[u8], ) -> Result<(ByteSet, usize), DeserializeError> { use core::mem::size_of; wire::check_slice_len(slice, 2 * size_of::<u128>(), "byte set")?; let mut nread = 0; let (low, nr) = wire::try_read_u128(slice, "byte set low bucket")?; nread += nr; let (high, nr) = wire::try_read_u128(slice, "byte set high bucket")?; nread += nr; Ok((ByteSet { bits: BitSet([low, high]) }, nread)) } /// Writes this byte set to the given byte buffer. If the given buffer is /// too small, then an error is returned. Upon success, the total number of /// bytes written is returned. The number of bytes written is guaranteed to /// be a multiple of 8. pub(crate) fn write_to<E: crate::util::wire::Endian>( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { use core::mem::size_of; let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("byte set")); } let mut nw = 0; E::write_u128(self.bits.0[0], &mut dst[nw..]); nw += size_of::<u128>(); E::write_u128(self.bits.0[1], &mut dst[nw..]); nw += size_of::<u128>(); assert_eq!(nwrite, nw, "expected to write certain number of bytes",); assert_eq!( nw % 8, 0, "expected to write multiple of 8 bytes for byte set", ); Ok(nw) } /// Returns the total number of bytes written by `write_to`. pub(crate) fn write_to_len(&self) -> usize { 2 * core::mem::size_of::<u128>() } } impl core::fmt::Debug for BitSet { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let mut fmtd = f.debug_set(); for b in 0u8..=255 { if (ByteSet { bits: *self }).contains(b) { fmtd.entry(&b); } } fmtd.finish() } } #[derive(Debug)] pub(crate) struct ByteSetIter<'a> { set: &'a ByteSet, b: usize, } impl<'a> Iterator for ByteSetIter<'a> { type Item = u8; fn next(&mut self) -> Option<u8> { while self.b <= 255 { let b = u8::try_from(self.b).unwrap(); self.b += 1; if self.set.contains(b) { return Some(b); } } None } } #[derive(Debug)] pub(crate) struct ByteSetRangeIter<'a> { set: &'a ByteSet, b: usize, } impl<'a> Iterator for ByteSetRangeIter<'a> { type Item = (u8, u8); fn next(&mut self) -> Option<(u8, u8)> { let asu8 = |n: usize| u8::try_from(n).unwrap(); while self.b <= 255 { let start = asu8(self.b); self.b += 1; if !self.set.contains(start) { continue; } let mut end = start; while self.b <= 255 && self.set.contains(asu8(self.b)) { end = asu8(self.b); self.b += 1; } return Some((start, end)); } None } } #[cfg(all(test, feature = "alloc"))] mod tests { use alloc::{vec, vec::Vec}; use super::*; #[test] fn byte_classes() { let mut set = ByteClassSet::empty(); set.set_range(b'a', b'z'); let classes = set.byte_classes(); assert_eq!(classes.get(0), 0); assert_eq!(classes.get(1), 0); assert_eq!(classes.get(2), 0); assert_eq!(classes.get(b'a' - 1), 0); assert_eq!(classes.get(b'a'), 1); assert_eq!(classes.get(b'm'), 1); assert_eq!(classes.get(b'z'), 1); assert_eq!(classes.get(b'z' + 1), 2); assert_eq!(classes.get(254), 2); assert_eq!(classes.get(255), 2); let mut set = ByteClassSet::empty(); set.set_range(0, 2); set.set_range(4, 6); let classes = set.byte_classes(); assert_eq!(classes.get(0), 0); assert_eq!(classes.get(1), 0); assert_eq!(classes.get(2), 0); assert_eq!(classes.get(3), 1); assert_eq!(classes.get(4), 2); assert_eq!(classes.get(5), 2); assert_eq!(classes.get(6), 2); assert_eq!(classes.get(7), 3); assert_eq!(classes.get(255), 3); } #[test] fn full_byte_classes() { let mut set = ByteClassSet::empty(); for b in 0u8..=255 { set.set_range(b, b); } assert_eq!(set.byte_classes().alphabet_len(), 257); } #[test] fn elements_typical() { let mut set = ByteClassSet::empty(); set.set_range(b'b', b'd'); set.set_range(b'g', b'm'); set.set_range(b'z', b'z'); let classes = set.byte_classes(); // class 0: \x00-a // class 1: b-d // class 2: e-f // class 3: g-m // class 4: n-y // class 5: z-z // class 6: \x7B-\xFF // class 7: EOI assert_eq!(classes.alphabet_len(), 8); let elements = classes.elements(Unit::u8(0)).collect::<Vec<_>>(); assert_eq!(elements.len(), 98); assert_eq!(elements[0], Unit::u8(b'\x00')); assert_eq!(elements[97], Unit::u8(b'a')); let elements = classes.elements(Unit::u8(1)).collect::<Vec<_>>(); assert_eq!( elements, vec![Unit::u8(b'b'), Unit::u8(b'c'), Unit::u8(b'd')], ); let elements = classes.elements(Unit::u8(2)).collect::<Vec<_>>(); assert_eq!(elements, vec![Unit::u8(b'e'), Unit::u8(b'f')],); let elements = classes.elements(Unit::u8(3)).collect::<Vec<_>>(); assert_eq!( elements, vec![ Unit::u8(b'g'), Unit::u8(b'h'), Unit::u8(b'i'), Unit::u8(b'j'), Unit::u8(b'k'), Unit::u8(b'l'), Unit::u8(b'm'), ], ); let elements = classes.elements(Unit::u8(4)).collect::<Vec<_>>(); assert_eq!(elements.len(), 12); assert_eq!(elements[0], Unit::u8(b'n')); assert_eq!(elements[11], Unit::u8(b'y')); let elements = classes.elements(Unit::u8(5)).collect::<Vec<_>>(); assert_eq!(elements, vec![Unit::u8(b'z')]); let elements = classes.elements(Unit::u8(6)).collect::<Vec<_>>(); assert_eq!(elements.len(), 133); assert_eq!(elements[0], Unit::u8(b'\x7B')); assert_eq!(elements[132], Unit::u8(b'\xFF')); let elements = classes.elements(Unit::eoi(7)).collect::<Vec<_>>(); assert_eq!(elements, vec![Unit::eoi(256)]); } #[test] fn elements_singletons() { let classes = ByteClasses::singletons(); assert_eq!(classes.alphabet_len(), 257); let elements = classes.elements(Unit::u8(b'a')).collect::<Vec<_>>(); assert_eq!(elements, vec![Unit::u8(b'a')]); let elements = classes.elements(Unit::eoi(5)).collect::<Vec<_>>(); assert_eq!(elements, vec![Unit::eoi(256)]); } #[test] fn elements_empty() { let classes = ByteClasses::empty(); assert_eq!(classes.alphabet_len(), 2); let elements = classes.elements(Unit::u8(0)).collect::<Vec<_>>(); assert_eq!(elements.len(), 256); assert_eq!(elements[0], Unit::u8(b'\x00')); assert_eq!(elements[255], Unit::u8(b'\xFF')); let elements = classes.elements(Unit::eoi(1)).collect::<Vec<_>>(); assert_eq!(elements, vec![Unit::eoi(256)]); } #[test] fn representatives() { let mut set = ByteClassSet::empty(); set.set_range(b'b', b'd'); set.set_range(b'g', b'm'); set.set_range(b'z', b'z'); let classes = set.byte_classes(); let got: Vec<Unit> = classes.representatives(..).collect(); let expected = vec![ Unit::u8(b'\x00'), Unit::u8(b'b'), Unit::u8(b'e'), Unit::u8(b'g'), Unit::u8(b'n'), Unit::u8(b'z'), Unit::u8(b'\x7B'), Unit::eoi(7), ]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(..0).collect(); assert!(got.is_empty()); let got: Vec<Unit> = classes.representatives(1..1).collect(); assert!(got.is_empty()); let got: Vec<Unit> = classes.representatives(255..255).collect(); assert!(got.is_empty()); // A weird case that is the only guaranteed to way to get an iterator // of just the EOI class by excluding all possible byte values. let got: Vec<Unit> = classes .representatives(( core::ops::Bound::Excluded(255), core::ops::Bound::Unbounded, )) .collect(); let expected = vec![Unit::eoi(7)]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(..=255).collect(); let expected = vec![ Unit::u8(b'\x00'), Unit::u8(b'b'), Unit::u8(b'e'), Unit::u8(b'g'), Unit::u8(b'n'), Unit::u8(b'z'), Unit::u8(b'\x7B'), ]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(b'b'..=b'd').collect(); let expected = vec![Unit::u8(b'b')]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(b'a'..=b'd').collect(); let expected = vec![Unit::u8(b'a'), Unit::u8(b'b')]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(b'b'..=b'e').collect(); let expected = vec![Unit::u8(b'b'), Unit::u8(b'e')]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(b'A'..=b'Z').collect(); let expected = vec![Unit::u8(b'A')]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(b'A'..=b'z').collect(); let expected = vec![ Unit::u8(b'A'), Unit::u8(b'b'), Unit::u8(b'e'), Unit::u8(b'g'), Unit::u8(b'n'), Unit::u8(b'z'), ]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(b'z'..).collect(); let expected = vec![Unit::u8(b'z'), Unit::u8(b'\x7B'), Unit::eoi(7)]; assert_eq!(expected, got); let got: Vec<Unit> = classes.representatives(b'z'..=0xFF).collect(); let expected = vec![Unit::u8(b'z'), Unit::u8(b'\x7B')]; assert_eq!(expected, got); } } <file_sep>/regex-automata/src/hybrid/dfa.rs /*! Types and routines specific to lazy DFAs. This module is the home of [`hybrid::dfa::DFA`](DFA). This module also contains a [`hybrid::dfa::Builder`](Builder) and a [`hybrid::dfa::Config`](Config) for configuring and building a lazy DFA. */ use core::{iter, mem::size_of}; use alloc::vec::Vec; use crate::{ hybrid::{ error::{BuildError, CacheError}, id::{LazyStateID, LazyStateIDError}, search, }, nfa::thompson, util::{ alphabet::{self, ByteClasses, ByteSet}, determinize::{self, State, StateBuilderEmpty, StateBuilderNFA}, empty, prefilter::Prefilter, primitives::{PatternID, StateID as NFAStateID}, search::{ Anchored, HalfMatch, Input, MatchError, MatchKind, PatternSet, }, sparse_set::SparseSets, start::{Start, StartByteMap}, }, }; /// The minimum number of states that a lazy DFA's cache size must support. /// /// This is checked at time of construction to ensure that at least some small /// number of states can fit in the given capacity allotment. If we can't fit /// at least this number of states, then the thinking is that it's pretty /// senseless to use the lazy DFA. More to the point, parts of the code do /// assume that the cache can fit at least some small number of states. const MIN_STATES: usize = SENTINEL_STATES + 2; /// The number of "sentinel" states that get added to every lazy DFA. /// /// These are special states indicating status conditions of a search: unknown, /// dead and quit. These states in particular also use zero NFA states, so /// their memory usage is quite small. This is relevant for computing the /// minimum memory needed for a lazy DFA cache. const SENTINEL_STATES: usize = 3; /// A hybrid NFA/DFA (also called a "lazy DFA") for regex searching. /// /// A lazy DFA is a DFA that builds itself at search time. It otherwise has /// very similar characteristics as a [`dense::DFA`](crate::dfa::dense::DFA). /// Indeed, both support precisely the same regex features with precisely the /// same semantics. /// /// Where as a `dense::DFA` must be completely built to handle any input before /// it may be used for search, a lazy DFA starts off effectively empty. During /// a search, a lazy DFA will build itself depending on whether it has already /// computed the next transition or not. If it has, then it looks a lot like /// a `dense::DFA` internally: it does a very fast table based access to find /// the next transition. Otherwise, if the state hasn't been computed, then it /// does determinization _for that specific transition_ to compute the next DFA /// state. /// /// The main selling point of a lazy DFA is that, in practice, it has /// the performance profile of a `dense::DFA` without the weakness of it /// taking worst case exponential time to build. Indeed, for each byte of /// input, the lazy DFA will construct as most one new DFA state. Thus, a /// lazy DFA achieves worst case `O(mn)` time for regex search (where `m ~ /// pattern.len()` and `n ~ haystack.len()`). /// /// The main downsides of a lazy DFA are: /// /// 1. It requires mutable "cache" space during search. This is where the /// transition table, among other things, is stored. /// 2. In pathological cases (e.g., if the cache is too small), it will run /// out of room and either require a bigger cache capacity or will repeatedly /// clear the cache and thus repeatedly regenerate DFA states. Overall, this /// will tend to be slower than a typical NFA simulation. /// /// # Capabilities /// /// Like a `dense::DFA`, a single lazy DFA fundamentally supports the following /// operations: /// /// 1. Detection of a match. /// 2. Location of the end of a match. /// 3. In the case of a lazy DFA with multiple patterns, which pattern matched /// is reported as well. /// /// A notable absence from the above list of capabilities is the location of /// the *start* of a match. In order to provide both the start and end of /// a match, *two* lazy DFAs are required. This functionality is provided by a /// [`Regex`](crate::hybrid::regex::Regex). /// /// # Example /// /// This shows how to build a lazy DFA with the default configuration and /// execute a search. Notice how, in contrast to a `dense::DFA`, we must create /// a cache and pass it to our search routine. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let dfa = DFA::new("foo[0-9]+")?; /// let mut cache = dfa.create_cache(); /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd( /// &mut cache, &Input::new("foo12345"))?, /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct DFA { config: Config, nfa: thompson::NFA, stride2: usize, start_map: StartByteMap, classes: ByteClasses, quitset: ByteSet, cache_capacity: usize, } impl DFA { /// Parse the given regular expression using a default configuration and /// return the corresponding lazy DFA. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let dfa = DFA::new("foo[0-9]+bar")?; /// let mut cache = dfa.create_cache(); /// /// let expected = HalfMatch::must(0, 11); /// assert_eq!( /// Some(expected), /// dfa.try_search_fwd(&mut cache, &Input::new("foo12345bar"))?, /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new(pattern: &str) -> Result<DFA, BuildError> { DFA::builder().build(pattern) } /// Parse the given regular expressions using a default configuration and /// return the corresponding lazy multi-DFA. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+"])?; /// let mut cache = dfa.create_cache(); /// /// let expected = HalfMatch::must(1, 3); /// assert_eq!( /// Some(expected), /// dfa.try_search_fwd(&mut cache, &Input::new("foo12345bar"))?, /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new_many<P: AsRef<str>>(patterns: &[P]) -> Result<DFA, BuildError> { DFA::builder().build_many(patterns) } /// Create a new lazy DFA that matches every input. /// /// # Example /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let dfa = DFA::always_match()?; /// let mut cache = dfa.create_cache(); /// /// let expected = HalfMatch::must(0, 0); /// assert_eq!(Some(expected), dfa.try_search_fwd( /// &mut cache, &Input::new(""))?, /// ); /// assert_eq!(Some(expected), dfa.try_search_fwd( /// &mut cache, &Input::new("foo"))?, /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn always_match() -> Result<DFA, BuildError> { let nfa = thompson::NFA::always_match(); Builder::new().build_from_nfa(nfa) } /// Create a new lazy DFA that never matches any input. /// /// # Example /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, Input}; /// /// let dfa = DFA::never_match()?; /// let mut cache = dfa.create_cache(); /// /// assert_eq!(None, dfa.try_search_fwd(&mut cache, &Input::new(""))?); /// assert_eq!(None, dfa.try_search_fwd(&mut cache, &Input::new("foo"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn never_match() -> Result<DFA, BuildError> { let nfa = thompson::NFA::never_match(); Builder::new().build_from_nfa(nfa) } /// Return a default configuration for a `DFA`. /// /// This is a convenience routine to avoid needing to import the [`Config`] /// type when customizing the construction of a lazy DFA. /// /// # Example /// /// This example shows how to build a lazy DFA that heuristically supports /// Unicode word boundaries. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, MatchError, Input}; /// /// let re = DFA::builder() /// .configure(DFA::config().unicode_word_boundary(true)) /// .build(r"\b\w+\b")?; /// let mut cache = re.create_cache(); /// /// // Since our haystack is all ASCII, the DFA search sees then and knows /// // it is legal to interpret Unicode word boundaries as ASCII word /// // boundaries. /// let input = Input::new("!!foo!!"); /// let expected = HalfMatch::must(0, 5); /// assert_eq!(Some(expected), re.try_search_fwd(&mut cache, &input)?); /// /// // But if our haystack contains non-ASCII, then the search will fail /// // with an error. /// let input = Input::new("!!βββ!!"); /// let expected = MatchError::quit(b'\xCE', 2); /// assert_eq!(Err(expected), re.try_search_fwd(&mut cache, &input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn config() -> Config { Config::new() } /// Return a builder for configuring the construction of a `Regex`. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. /// /// # Example /// /// This example shows how to use the builder to disable UTF-8 mode /// everywhere for lazy DFAs. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, util::syntax, HalfMatch, Input}; /// /// let re = DFA::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let mut cache = re.create_cache(); /// /// let input = Input::new(b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"); /// let expected = Some(HalfMatch::must(0, 9)); /// let got = re.try_search_fwd(&mut cache, &input)?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn builder() -> Builder { Builder::new() } /// Create a new cache for this lazy DFA. /// /// The cache returned should only be used for searches for this /// lazy DFA. If you want to reuse the cache for another DFA, then /// you must call [`Cache::reset`] with that DFA (or, equivalently, /// [`DFA::reset_cache`]). pub fn create_cache(&self) -> Cache { Cache::new(self) } /// Reset the given cache such that it can be used for searching with the /// this lazy DFA (and only this DFA). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different lazy DFA. /// /// Resetting a cache sets its "clear count" to 0. This is relevant if the /// lazy DFA has been configured to "give up" after it has cleared the /// cache a certain number of times. /// /// Any lazy state ID generated by the cache prior to resetting it is /// invalid after the reset. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different DFA. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let dfa1 = DFA::new(r"\w")?; /// let dfa2 = DFA::new(r"\W")?; /// /// let mut cache = dfa1.create_cache(); /// assert_eq!( /// Some(HalfMatch::must(0, 2)), /// dfa1.try_search_fwd(&mut cache, &Input::new("Δ"))?, /// ); /// /// // Using 'cache' with dfa2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the DFA we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 'dfa1' is also not /// // allowed. /// dfa2.reset_cache(&mut cache); /// assert_eq!( /// Some(HalfMatch::must(0, 3)), /// dfa2.try_search_fwd(&mut cache, &Input::new("☃"))?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset_cache(&self, cache: &mut Cache) { Lazy::new(self, cache).reset_cache() } /// Returns the total number of patterns compiled into this lazy DFA. /// /// In the case of a DFA that contains no patterns, this returns `0`. /// /// # Example /// /// This example shows the pattern length for a DFA that never matches: /// /// ``` /// use regex_automata::hybrid::dfa::DFA; /// /// let dfa = DFA::never_match()?; /// assert_eq!(dfa.pattern_len(), 0); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And another example for a DFA that matches at every position: /// /// ``` /// use regex_automata::hybrid::dfa::DFA; /// /// let dfa = DFA::always_match()?; /// assert_eq!(dfa.pattern_len(), 1); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And finally, a DFA that was constructed from multiple patterns: /// /// ``` /// use regex_automata::hybrid::dfa::DFA; /// /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; /// assert_eq!(dfa.pattern_len(), 3); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn pattern_len(&self) -> usize { self.nfa.pattern_len() } /// Returns the equivalence classes that make up the alphabet for this DFA. /// /// Unless [`Config::byte_classes`] was disabled, it is possible that /// multiple distinct bytes are grouped into the same equivalence class /// if it is impossible for them to discriminate between a match and a /// non-match. This has the effect of reducing the overall alphabet size /// and in turn potentially substantially reducing the size of the DFA's /// transition table. /// /// The downside of using equivalence classes like this is that every state /// transition will automatically use this map to convert an arbitrary /// byte to its corresponding equivalence class. In practice this has a /// negligible impact on performance. pub fn byte_classes(&self) -> &ByteClasses { &self.classes } /// Returns this lazy DFA's configuration. pub fn get_config(&self) -> &Config { &self.config } /// Returns a reference to the underlying NFA. pub fn get_nfa(&self) -> &thompson::NFA { &self.nfa } /// Returns the stride, as a base-2 exponent, required for these /// equivalence classes. /// /// The stride is always the smallest power of 2 that is greater than or /// equal to the alphabet length. This is done so that converting between /// state IDs and indices can be done with shifts alone, which is much /// faster than integer division. fn stride2(&self) -> usize { self.stride2 } /// Returns the total stride for every state in this lazy DFA. This /// corresponds to the total number of transitions used by each state in /// this DFA's transition table. fn stride(&self) -> usize { 1 << self.stride2() } /// Returns the memory usage, in bytes, of this lazy DFA. /// /// This does **not** include the stack size used up by this lazy DFA. To /// compute that, use `std::mem::size_of::<DFA>()`. This also does not /// include the size of the `Cache` used. /// /// This also does not include any heap memory used by the NFA inside of /// this hybrid NFA/DFA. This is because the NFA's ownership is shared, and /// thus not owned by this hybrid NFA/DFA. More practically, several regex /// engines in this crate embed an NFA, and reporting the NFA's memory /// usage in all of them would likely result in reporting higher heap /// memory than is actually used. pub fn memory_usage(&self) -> usize { // The only thing that uses heap memory in a DFA is the NFA. But the // NFA has shared ownership, so reporting its memory as part of the // hybrid DFA is likely to lead to double-counting the NFA memory // somehow. In particular, this DFA does not really own an NFA, so // including it in the DFA's memory usage doesn't seem semantically // correct. 0 } } impl DFA { /// Executes a forward search and returns the end position of the leftmost /// match that is found. If no match exists, then `None` is returned. /// /// In particular, this method continues searching even after it enters /// a match state. The search only terminates once it has reached the /// end of the input or when it has entered a dead or quit state. Upon /// termination, the position of the last byte seen while still in a match /// state is returned. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to run a basic search. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let dfa = DFA::new("foo[0-9]+")?; /// let mut cache = dfa.create_cache(); /// let expected = HalfMatch::must(0, 8); /// assert_eq!(Some(expected), dfa.try_search_fwd( /// &mut cache, &Input::new("foo12345"))?, /// ); /// /// // Even though a match is found after reading the first byte (`a`), /// // the leftmost first match semantics demand that we find the earliest /// // match that prefers earlier parts of the pattern over later parts. /// let dfa = DFA::new("abc|a")?; /// let mut cache = dfa.create_cache(); /// let expected = HalfMatch::must(0, 3); /// assert_eq!(Some(expected), dfa.try_search_fwd( /// &mut cache, &Input::new("abc"))?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specific pattern search /// /// This example shows how to build a lazy multi-DFA that permits searching /// for specific patterns. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// Anchored, HalfMatch, PatternID, Input, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().starts_for_each_pattern(true)) /// .build_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; /// let mut cache = dfa.create_cache(); /// let haystack = "foo123"; /// /// // Since we are using the default leftmost-first match and both /// // patterns match at the same starting position, only the first pattern /// // will be returned in this case when doing a search for any of the /// // patterns. /// let expected = Some(HalfMatch::must(0, 6)); /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; /// assert_eq!(expected, got); /// /// // But if we want to check whether some other pattern matches, then we /// // can provide its pattern ID. /// let expected = Some(HalfMatch::must(1, 6)); /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(1))); /// let got = dfa.try_search_fwd(&mut cache, &input)?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specifying the bounds of a search /// /// This example shows how providing the bounds of a search can produce /// different results than simply sub-slicing the haystack. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// // N.B. We disable Unicode here so that we use a simple ASCII word /// // boundary. Alternatively, we could enable heuristic support for /// // Unicode word boundaries since our haystack is pure ASCII. /// let dfa = DFA::new(r"(?-u)\b[0-9]{3}\b")?; /// let mut cache = dfa.create_cache(); /// let haystack = "foo123bar"; /// /// // Since we sub-slice the haystack, the search doesn't know about the /// // larger context and assumes that `123` is surrounded by word /// // boundaries. And of course, the match position is reported relative /// // to the sub-slice as well, which means we get `3` instead of `6`. /// let expected = Some(HalfMatch::must(0, 3)); /// let got = dfa.try_search_fwd( /// &mut cache, /// &Input::new(&haystack[3..6]), /// )?; /// assert_eq!(expected, got); /// /// // But if we provide the bounds of the search within the context of the /// // entire haystack, then the search can take the surrounding context /// // into account. (And if we did find a match, it would be reported /// // as a valid offset into `haystack` instead of its sub-slice.) /// let expected = None; /// let got = dfa.try_search_fwd( /// &mut cache, /// &Input::new(haystack).range(3..6), /// )?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search_fwd( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); let hm = match search::find_fwd(self, cache, input)? { None => return Ok(None), Some(hm) if !utf8empty => return Ok(Some(hm)), Some(hm) => hm, }; // We get to this point when we know our DFA can match the empty string // AND when UTF-8 mode is enabled. In this case, we skip any matches // whose offset splits a codepoint. Such a match is necessarily a // zero-width match, because UTF-8 mode requires the underlying NFA // to be built such that all non-empty matches span valid UTF-8. // Therefore, any match that ends in the middle of a codepoint cannot // be part of a span of valid UTF-8 and thus must be an empty match. // In such cases, we skip it, so as not to report matches that split a // codepoint. // // Note that this is not a checked assumption. Callers *can* provide an // NFA with UTF-8 mode enabled but produces non-empty matches that span // invalid UTF-8. But doing so is documented to result in unspecified // behavior. empty::skip_splits_fwd(input, hm, hm.offset(), |input| { let got = search::find_fwd(self, cache, input)?; Ok(got.map(|hm| (hm, hm.offset()))) }) } /// Executes a reverse search and returns the start of the position of the /// leftmost match that is found. If no match exists, then `None` is /// returned. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This routine is principally useful when used in /// conjunction with the /// [`nfa::thompson::Config::reverse`](crate::nfa::thompson::Config::reverse) /// configuration. In general, it's unlikely to be correct to use both /// `try_search_fwd` and `try_search_rev` with the same DFA since any /// particular DFA will only support searching in one direction with /// respect to the pattern. /// /// ``` /// use regex_automata::{ /// nfa::thompson, /// hybrid::dfa::DFA, /// HalfMatch, Input, /// }; /// /// let dfa = DFA::builder() /// .thompson(thompson::Config::new().reverse(true)) /// .build("foo[0-9]+")?; /// let mut cache = dfa.create_cache(); /// let expected = HalfMatch::must(0, 0); /// assert_eq!( /// Some(expected), /// dfa.try_search_rev(&mut cache, &Input::new("foo12345"))?, /// ); /// /// // Even though a match is found after reading the last byte (`c`), /// // the leftmost first match semantics demand that we find the earliest /// // match that prefers earlier parts of the pattern over latter parts. /// let dfa = DFA::builder() /// .thompson(thompson::Config::new().reverse(true)) /// .build("abc|c")?; /// let mut cache = dfa.create_cache(); /// let expected = HalfMatch::must(0, 0); /// assert_eq!(Some(expected), dfa.try_search_rev( /// &mut cache, &Input::new("abc"))?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: UTF-8 mode /// /// This examples demonstrates that UTF-8 mode applies to reverse /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all /// matches reported must correspond to valid UTF-8 spans. This includes /// prohibiting zero-width matches that split a codepoint. /// /// UTF-8 mode is enabled by default. Notice below how the only zero-width /// matches reported are those at UTF-8 boundaries: /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .thompson(thompson::Config::new().reverse(true)) /// .build(r"")?; /// let mut cache = dfa.create_cache(); /// /// // Run the reverse DFA to collect all matches. /// let mut input = Input::new("☃"); /// let mut matches = vec![]; /// loop { /// match dfa.try_search_rev(&mut cache, &input)? { /// None => break, /// Some(hm) => { /// matches.push(hm); /// if hm.offset() == 0 || input.end() == 0 { /// break; /// } else if hm.offset() < input.end() { /// input.set_end(hm.offset()); /// } else { /// // This is only necessary to handle zero-width /// // matches, which of course occur in this example. /// // Without this, the search would never advance /// // backwards beyond the initial match. /// input.set_end(input.end() - 1); /// } /// } /// } /// } /// /// // No matches split a codepoint. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Now let's look at the same example, but with UTF-8 mode on the /// underlying NFA disabled: /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .thompson(thompson::Config::new().reverse(true).utf8(false)) /// .build(r"")?; /// let mut cache = dfa.create_cache(); /// /// // Run the reverse DFA to collect all matches. /// let mut input = Input::new("☃"); /// let mut matches = vec![]; /// loop { /// match dfa.try_search_rev(&mut cache, &input)? { /// None => break, /// Some(hm) => { /// matches.push(hm); /// if hm.offset() == 0 || input.end() == 0 { /// break; /// } else if hm.offset() < input.end() { /// input.set_end(hm.offset()); /// } else { /// // This is only necessary to handle zero-width /// // matches, which of course occur in this example. /// // Without this, the search would never advance /// // backwards beyond the initial match. /// input.set_end(input.end() - 1); /// } /// } /// } /// } /// /// // No matches split a codepoint. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(0, 2), /// HalfMatch::must(0, 1), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search_rev( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<Option<HalfMatch>, MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); let hm = match search::find_rev(self, cache, input)? { None => return Ok(None), Some(hm) if !utf8empty => return Ok(Some(hm)), Some(hm) => hm, }; empty::skip_splits_rev(input, hm, hm.offset(), |input| { let got = search::find_rev(self, cache, input)?; Ok(got.map(|hm| (hm, hm.offset()))) }) } /// Executes an overlapping forward search and returns the end position of /// matches as they are found. If no match exists, then `None` is returned. /// /// This routine is principally only useful when searching for multiple /// patterns on inputs where multiple patterns may match the same regions /// of text. In particular, callers must preserve the automaton's search /// state from prior calls so that the implementation knows where the last /// match occurred. /// /// When using this routine to implement an iterator of overlapping /// matches, the `start` of the search should remain invariant throughout /// iteration. The `OverlappingState` given to the search will keep track /// of the current position of the search. (This is because multiple /// matches may be reported at the same position, so only the search /// implementation itself knows when to advance the position.) /// /// If for some reason you want the search to forget about its previous /// state and restart the search at a particular position, then setting the /// state to [`OverlappingState::start`] will accomplish that. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to run a basic overlapping search. Notice /// that we build the automaton with a `MatchKind::All` configuration. /// Overlapping searches are unlikely to work as one would expect when /// using the default `MatchKind::LeftmostFirst` match semantics, since /// leftmost-first matching is fundamentally incompatible with overlapping /// searches. Namely, overlapping searches need to report matches as they /// are seen, where as leftmost-first searches will continue searching even /// after a match has been observed in order to find the conventional end /// position of the match. More concretely, leftmost-first searches use /// dead states to terminate a search after a specific match can no longer /// be extended. Overlapping searches instead do the opposite by continuing /// the search to find totally new matches (potentially of other patterns). /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// hybrid::dfa::{DFA, OverlappingState}, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build_many(&[r"\w+$", r"\S+$"])?; /// let mut cache = dfa.create_cache(); /// /// let haystack = "@foo"; /// let mut state = OverlappingState::start(); /// /// let expected = Some(HalfMatch::must(1, 4)); /// dfa.try_search_overlapping_fwd( /// &mut cache, &Input::new(haystack), &mut state, /// )?; /// assert_eq!(expected, state.get_match()); /// /// // The first pattern also matches at the same position, so re-running /// // the search will yield another match. Notice also that the first /// // pattern is returned after the second. This is because the second /// // pattern begins its match before the first, is therefore an earlier /// // match and is thus reported first. /// let expected = Some(HalfMatch::must(0, 4)); /// dfa.try_search_overlapping_fwd( /// &mut cache, &Input::new(haystack), &mut state, /// )?; /// assert_eq!(expected, state.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search_overlapping_fwd( &self, cache: &mut Cache, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); search::find_overlapping_fwd(self, cache, input, state)?; match state.get_match() { None => Ok(()), Some(_) if !utf8empty => Ok(()), Some(_) => skip_empty_utf8_splits_overlapping( input, state, |input, state| { search::find_overlapping_fwd(self, cache, input, state) }, ), } } /// Executes a reverse overlapping search and returns the start of the /// position of the leftmost match that is found. If no match exists, then /// `None` is returned. /// /// When using this routine to implement an iterator of overlapping /// matches, the `start` of the search should remain invariant throughout /// iteration. The `OverlappingState` given to the search will keep track /// of the current position of the search. (This is because multiple /// matches may be reported at the same position, so only the search /// implementation itself knows when to advance the position.) /// /// If for some reason you want the search to forget about its previous /// state and restart the search at a particular position, then setting the /// state to [`OverlappingState::start`] will accomplish that. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example: UTF-8 mode /// /// This examples demonstrates that UTF-8 mode applies to reverse /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all /// matches reported must correspond to valid UTF-8 spans. This includes /// prohibiting zero-width matches that split a codepoint. /// /// UTF-8 mode is enabled by default. Notice below how the only zero-width /// matches reported are those at UTF-8 boundaries: /// /// ``` /// use regex_automata::{ /// hybrid::dfa::{DFA, OverlappingState}, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .thompson(thompson::Config::new().reverse(true)) /// .build_many(&[r"", r"☃"])?; /// let mut cache = dfa.create_cache(); /// /// // Run the reverse DFA to collect all matches. /// let input = Input::new("☃"); /// let mut state = OverlappingState::start(); /// let mut matches = vec![]; /// loop { /// dfa.try_search_overlapping_rev(&mut cache, &input, &mut state)?; /// match state.get_match() { /// None => break, /// Some(hm) => matches.push(hm), /// } /// } /// /// // No matches split a codepoint. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(1, 0), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Now let's look at the same example, but with UTF-8 mode on the /// underlying NFA disabled: /// /// ``` /// use regex_automata::{ /// hybrid::dfa::{DFA, OverlappingState}, /// nfa::thompson, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .thompson(thompson::Config::new().reverse(true).utf8(false)) /// .build_many(&[r"", r"☃"])?; /// let mut cache = dfa.create_cache(); /// /// // Run the reverse DFA to collect all matches. /// let input = Input::new("☃"); /// let mut state = OverlappingState::start(); /// let mut matches = vec![]; /// loop { /// dfa.try_search_overlapping_rev(&mut cache, &input, &mut state)?; /// match state.get_match() { /// None => break, /// Some(hm) => matches.push(hm), /// } /// } /// /// // Now *all* positions match, even within a codepoint, /// // because we lifted the requirement that matches /// // correspond to valid UTF-8 spans. /// let expected = vec![ /// HalfMatch::must(0, 3), /// HalfMatch::must(0, 2), /// HalfMatch::must(0, 1), /// HalfMatch::must(1, 0), /// HalfMatch::must(0, 0), /// ]; /// assert_eq!(expected, matches); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search_overlapping_rev( &self, cache: &mut Cache, input: &Input<'_>, state: &mut OverlappingState, ) -> Result<(), MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); search::find_overlapping_rev(self, cache, input, state)?; match state.get_match() { None => Ok(()), Some(_) if !utf8empty => Ok(()), Some(_) => skip_empty_utf8_splits_overlapping( input, state, |input, state| { search::find_overlapping_rev(self, cache, input, state) }, ), } } /// Writes the set of patterns that match anywhere in the given search /// configuration to `patset`. If multiple patterns match at the same /// position and the underlying DFA supports overlapping matches, then all /// matching patterns are written to the given set. /// /// Unless all of the patterns in this DFA are anchored, then generally /// speaking, this will visit every byte in the haystack. /// /// This search routine *does not* clear the pattern set. This gives some /// flexibility to the caller (e.g., running multiple searches with the /// same pattern set), but does make the API bug-prone if you're reusing /// the same pattern set for multiple searches but intended them to be /// independent. /// /// If a pattern ID matched but the given `PatternSet` does not have /// sufficient capacity to store it, then it is not inserted and silently /// dropped. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in a number of circumstances: /// /// * The configuration of the lazy DFA may permit it to "quit" the search. /// For example, setting quit bytes or enabling heuristic support for /// Unicode word boundaries. The default configuration does not enable any /// option that could result in the lazy DFA quitting. /// * The configuration of the lazy DFA may also permit it to "give up" /// on a search if it makes ineffective use of its transition table /// cache. The default configuration does not enable this by default, /// although it is typically a good idea to. /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to find all matching patterns in a haystack, /// even when some patterns match at the same position as other patterns. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// hybrid::dfa::DFA, /// Input, MatchKind, PatternSet, /// }; /// /// let patterns = &[ /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", /// ]; /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build_many(patterns)?; /// let mut cache = dfa.create_cache(); /// /// let input = Input::new("foobar"); /// let mut patset = PatternSet::new(dfa.pattern_len()); /// dfa.try_which_overlapping_matches(&mut cache, &input, &mut patset)?; /// let expected = vec![0, 2, 3, 4, 6]; /// let got: Vec<usize> = patset.iter().map(|p| p.as_usize()).collect(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) -> Result<(), MatchError> { let mut state = OverlappingState::start(); while let Some(m) = { self.try_search_overlapping_fwd(cache, input, &mut state)?; state.get_match() } { let _ = patset.try_insert(m.pattern()); // There's nothing left to find, so we can stop. Or the caller // asked us to. if patset.is_full() || input.get_earliest() { break; } } Ok(()) } } impl DFA { /// Transitions from the current state to the next state, given the next /// byte of input. /// /// The given cache is used to either reuse pre-computed state /// transitions, or to store this newly computed transition for future /// reuse. Thus, this routine guarantees that it will never return a state /// ID that has an "unknown" tag. /// /// # State identifier validity /// /// The only valid value for `current` is the lazy state ID returned /// by the most recent call to `next_state`, `next_state_untagged`, /// `next_state_untagged_unchecked`, `start_state_forward` or /// `state_state_reverse` for the given `cache`. Any state ID returned from /// prior calls to these routines (with the same `cache`) is considered /// invalid (even if it gives an appearance of working). State IDs returned /// from _any_ prior call for different `cache` values are also always /// invalid. /// /// The returned ID is always a valid ID when `current` refers to a valid /// ID. Moreover, this routine is defined for all possible values of /// `input`. /// /// These validity rules are not checked, even in debug mode. Callers are /// required to uphold these rules themselves. /// /// Violating these state ID validity rules will not sacrifice memory /// safety, but _may_ produce an incorrect result or a panic. /// /// # Panics /// /// If the given ID does not refer to a valid state, then this routine /// may panic but it also may not panic and instead return an invalid or /// incorrect ID. /// /// # Example /// /// This shows a simplistic example for walking a lazy DFA for a given /// haystack by using the `next_state` method. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, Input}; /// /// let dfa = DFA::new(r"[a-z]+r")?; /// let mut cache = dfa.create_cache(); /// let haystack = "bar".as_bytes(); /// /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. /// let mut sid = dfa.start_state_forward( /// &mut cache, &Input::new(haystack), /// )?; /// // Walk all the bytes in the haystack. /// for &b in haystack { /// sid = dfa.next_state(&mut cache, sid, b)?; /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk the /// // special "EOI" transition at the end of the search. /// sid = dfa.next_eoi_state(&mut cache, sid)?; /// assert!(sid.is_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn next_state( &self, cache: &mut Cache, current: LazyStateID, input: u8, ) -> Result<LazyStateID, CacheError> { let class = usize::from(self.classes.get(input)); let offset = current.as_usize_untagged() + class; let sid = cache.trans[offset]; if !sid.is_unknown() { return Ok(sid); } let unit = alphabet::Unit::u8(input); Lazy::new(self, cache).cache_next_state(current, unit) } /// Transitions from the current state to the next state, given the next /// byte of input and a state ID that is not tagged. /// /// The only reason to use this routine is performance. In particular, the /// `next_state` method needs to do some additional checks, among them is /// to account for identifiers to states that are not yet computed. In /// such a case, the transition is computed on the fly. However, if it is /// known that the `current` state ID is untagged, then these checks can be /// omitted. /// /// Since this routine does not compute states on the fly, it does not /// modify the cache and thus cannot return an error. Consequently, `cache` /// does not need to be mutable and it is possible for this routine to /// return a state ID corresponding to the special "unknown" state. In /// this case, it is the caller's responsibility to use the prior state /// ID and `input` with `next_state` in order to force the computation of /// the unknown transition. Otherwise, trying to use the "unknown" state /// ID will just result in transitioning back to itself, and thus never /// terminating. (This is technically a special exemption to the state ID /// validity rules, but is permissible since this routine is guarateed to /// never mutate the given `cache`, and thus the identifier is guaranteed /// to remain valid.) /// /// See [`LazyStateID`] for more details on what it means for a state ID /// to be tagged. Also, see /// [`next_state_untagged_unchecked`](DFA::next_state_untagged_unchecked) /// for this same idea, but with bounds checks forcefully elided. /// /// # State identifier validity /// /// The only valid value for `current` is an **untagged** lazy /// state ID returned by the most recent call to `next_state`, /// `next_state_untagged`, `next_state_untagged_unchecked`, /// `start_state_forward` or `state_state_reverse` for the given `cache`. /// Any state ID returned from prior calls to these routines (with the /// same `cache`) is considered invalid (even if it gives an appearance /// of working). State IDs returned from _any_ prior call for different /// `cache` values are also always invalid. /// /// The returned ID is always a valid ID when `current` refers to a valid /// ID, although it may be tagged. Moreover, this routine is defined for /// all possible values of `input`. /// /// Not all validity rules are checked, even in debug mode. Callers are /// required to uphold these rules themselves. /// /// Violating these state ID validity rules will not sacrifice memory /// safety, but _may_ produce an incorrect result or a panic. /// /// # Panics /// /// If the given ID does not refer to a valid state, then this routine /// may panic but it also may not panic and instead return an invalid or /// incorrect ID. /// /// # Example /// /// This shows a simplistic example for walking a lazy DFA for a given /// haystack by using the `next_state_untagged` method where possible. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, Input}; /// /// let dfa = DFA::new(r"[a-z]+r")?; /// let mut cache = dfa.create_cache(); /// let haystack = "bar".as_bytes(); /// /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. /// let mut sid = dfa.start_state_forward( /// &mut cache, &Input::new(haystack), /// )?; /// // Walk all the bytes in the haystack. /// let mut at = 0; /// while at < haystack.len() { /// if sid.is_tagged() { /// sid = dfa.next_state(&mut cache, sid, haystack[at])?; /// } else { /// let mut prev_sid = sid; /// // We attempt to chew through as much as we can while moving /// // through untagged state IDs. Thus, the transition function /// // does less work on average per byte. (Unrolling this loop /// // may help even more.) /// while at < haystack.len() { /// prev_sid = sid; /// sid = dfa.next_state_untagged( /// &mut cache, sid, haystack[at], /// ); /// at += 1; /// if sid.is_tagged() { /// break; /// } /// } /// // We must ensure that we never proceed to the next iteration /// // with an unknown state ID. If we don't account for this /// // case, then search isn't guaranteed to terminate since all /// // transitions on unknown states loop back to itself. /// if sid.is_unknown() { /// sid = dfa.next_state( /// &mut cache, prev_sid, haystack[at - 1], /// )?; /// } /// } /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk the /// // special "EOI" transition at the end of the search. /// sid = dfa.next_eoi_state(&mut cache, sid)?; /// assert!(sid.is_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn next_state_untagged( &self, cache: &Cache, current: LazyStateID, input: u8, ) -> LazyStateID { debug_assert!(!current.is_tagged()); let class = usize::from(self.classes.get(input)); let offset = current.as_usize_unchecked() + class; cache.trans[offset] } /// Transitions from the current state to the next state, eliding bounds /// checks, given the next byte of input and a state ID that is not tagged. /// /// The only reason to use this routine is performance. In particular, the /// `next_state` method needs to do some additional checks, among them is /// to account for identifiers to states that are not yet computed. In /// such a case, the transition is computed on the fly. However, if it is /// known that the `current` state ID is untagged, then these checks can be /// omitted. /// /// Since this routine does not compute states on the fly, it does not /// modify the cache and thus cannot return an error. Consequently, `cache` /// does not need to be mutable and it is possible for this routine to /// return a state ID corresponding to the special "unknown" state. In /// this case, it is the caller's responsibility to use the prior state /// ID and `input` with `next_state` in order to force the computation of /// the unknown transition. Otherwise, trying to use the "unknown" state /// ID will just result in transitioning back to itself, and thus never /// terminating. (This is technically a special exemption to the state ID /// validity rules, but is permissible since this routine is guarateed to /// never mutate the given `cache`, and thus the identifier is guaranteed /// to remain valid.) /// /// See [`LazyStateID`] for more details on what it means for a state ID /// to be tagged. Also, see /// [`next_state_untagged`](DFA::next_state_untagged) /// for this same idea, but with memory safety guaranteed by retaining /// bounds checks. /// /// # State identifier validity /// /// The only valid value for `current` is an **untagged** lazy /// state ID returned by the most recent call to `next_state`, /// `next_state_untagged`, `next_state_untagged_unchecked`, /// `start_state_forward` or `state_state_reverse` for the given `cache`. /// Any state ID returned from prior calls to these routines (with the /// same `cache`) is considered invalid (even if it gives an appearance /// of working). State IDs returned from _any_ prior call for different /// `cache` values are also always invalid. /// /// The returned ID is always a valid ID when `current` refers to a valid /// ID, although it may be tagged. Moreover, this routine is defined for /// all possible values of `input`. /// /// Not all validity rules are checked, even in debug mode. Callers are /// required to uphold these rules themselves. /// /// Violating these state ID validity rules will not sacrifice memory /// safety, but _may_ produce an incorrect result or a panic. /// /// # Safety /// /// Callers of this method must guarantee that `current` refers to a valid /// state ID according to the rules described above. If `current` is not a /// valid state ID for this automaton, then calling this routine may result /// in undefined behavior. /// /// If `current` is valid, then the ID returned is valid for all possible /// values of `input`. #[inline] pub unsafe fn next_state_untagged_unchecked( &self, cache: &Cache, current: LazyStateID, input: u8, ) -> LazyStateID { debug_assert!(!current.is_tagged()); let class = usize::from(self.classes.get(input)); let offset = current.as_usize_unchecked() + class; *cache.trans.get_unchecked(offset) } /// Transitions from the current state to the next state for the special /// EOI symbol. /// /// The given cache is used to either reuse pre-computed state /// transitions, or to store this newly computed transition for future /// reuse. Thus, this routine guarantees that it will never return a state /// ID that has an "unknown" tag. /// /// This routine must be called at the end of every search in a correct /// implementation of search. Namely, lazy DFAs in this crate delay matches /// by one byte in order to support look-around operators. Thus, after /// reaching the end of a haystack, a search implementation must follow one /// last EOI transition. /// /// It is best to think of EOI as an additional symbol in the alphabet of a /// DFA that is distinct from every other symbol. That is, the alphabet of /// lazy DFAs in this crate has a logical size of 257 instead of 256, where /// 256 corresponds to every possible inhabitant of `u8`. (In practice, the /// physical alphabet size may be smaller because of alphabet compression /// via equivalence classes, but EOI is always represented somehow in the /// alphabet.) /// /// # State identifier validity /// /// The only valid value for `current` is the lazy state ID returned /// by the most recent call to `next_state`, `next_state_untagged`, /// `next_state_untagged_unchecked`, `start_state_forward` or /// `state_state_reverse` for the given `cache`. Any state ID returned from /// prior calls to these routines (with the same `cache`) is considered /// invalid (even if it gives an appearance of working). State IDs returned /// from _any_ prior call for different `cache` values are also always /// invalid. /// /// The returned ID is always a valid ID when `current` refers to a valid /// ID. /// /// These validity rules are not checked, even in debug mode. Callers are /// required to uphold these rules themselves. /// /// Violating these state ID validity rules will not sacrifice memory /// safety, but _may_ produce an incorrect result or a panic. /// /// # Panics /// /// If the given ID does not refer to a valid state, then this routine /// may panic but it also may not panic and instead return an invalid or /// incorrect ID. /// /// # Example /// /// This shows a simplistic example for walking a DFA for a given haystack, /// and then finishing the search with the final EOI transition. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, Input}; /// /// let dfa = DFA::new(r"[a-z]+r")?; /// let mut cache = dfa.create_cache(); /// let haystack = "bar".as_bytes(); /// /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. /// let mut sid = dfa.start_state_forward( /// &mut cache, &Input::new(haystack), /// )?; /// // Walk all the bytes in the haystack. /// for &b in haystack { /// sid = dfa.next_state(&mut cache, sid, b)?; /// } /// // Matches are always delayed by 1 byte, so we must explicitly walk /// // the special "EOI" transition at the end of the search. Without this /// // final transition, the assert below will fail since the DFA will not /// // have entered a match state yet! /// sid = dfa.next_eoi_state(&mut cache, sid)?; /// assert!(sid.is_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn next_eoi_state( &self, cache: &mut Cache, current: LazyStateID, ) -> Result<LazyStateID, CacheError> { let eoi = self.classes.eoi().as_usize(); let offset = current.as_usize_untagged() + eoi; let sid = cache.trans[offset]; if !sid.is_unknown() { return Ok(sid); } let unit = self.classes.eoi(); Lazy::new(self, cache).cache_next_state(current, unit) } /// Return the ID of the start state for this lazy DFA when executing a /// forward search. /// /// Unlike typical DFA implementations, the start state for DFAs in this /// crate is dependent on a few different factors: /// /// * The [`Anchored`] mode of the search. Unanchored, anchored and /// anchored searches for a specific [`PatternID`] all use different start /// states. /// * The position at which the search begins, via [`Input::start`]. This /// and the byte immediately preceding the start of the search (if one /// exists) influence which look-behind assertions are true at the start /// of the search. This in turn influences which start state is selected. /// * Whether the search is a forward or reverse search. This routine can /// only be used for forward searches. /// /// # Errors /// /// This may return a [`MatchError`] (not a [`CacheError`]!) if the search /// needs to give up when determining the start state (for example, if /// it sees a "quit" byte or if the cache has been cleared too many /// times). This can also return an error if the given `Input` contains an /// unsupported [`Anchored`] configuration. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn start_state_forward( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<LazyStateID, MatchError> { if !self.quitset.is_empty() && input.start() > 0 { let offset = input.start() - 1; let byte = input.haystack()[offset]; if self.quitset.contains(byte) { return Err(MatchError::quit(byte, offset)); } } let start_type = self.start_map.fwd(input); let start = LazyRef::new(self, cache) .get_cached_start_id(input, start_type)?; if !start.is_unknown() { return Ok(start); } Lazy::new(self, cache).cache_start_group(input, start_type) } /// Return the ID of the start state for this lazy DFA when executing a /// reverse search. /// /// Unlike typical DFA implementations, the start state for DFAs in this /// crate is dependent on a few different factors: /// /// * The [`Anchored`] mode of the search. Unanchored, anchored and /// anchored searches for a specific [`PatternID`] all use different start /// states. /// * The position at which the search begins, via [`Input::start`]. This /// and the byte immediately preceding the start of the search (if one /// exists) influence which look-behind assertions are true at the start /// of the search. This in turn influences which start state is selected. /// * Whether the search is a forward or reverse search. This routine can /// only be used for reverse searches. /// /// # Errors /// /// This may return a [`MatchError`] (not a [`CacheError`]!) if the search /// needs to give up when determining the start state (for example, if /// it sees a "quit" byte or if the cache has been cleared too many /// times). This can also return an error if the given `Input` contains an /// unsupported [`Anchored`] configuration. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn start_state_reverse( &self, cache: &mut Cache, input: &Input<'_>, ) -> Result<LazyStateID, MatchError> { if !self.quitset.is_empty() && input.end() < input.haystack().len() { let offset = input.end(); let byte = input.haystack()[offset]; if self.quitset.contains(byte) { return Err(MatchError::quit(byte, offset)); } } let start_type = self.start_map.rev(input); let start = LazyRef::new(self, cache) .get_cached_start_id(input, start_type)?; if !start.is_unknown() { return Ok(start); } Lazy::new(self, cache).cache_start_group(input, start_type) } /// Returns the total number of patterns that match in this state. /// /// If the lazy DFA was compiled with one pattern, then this must /// necessarily always return `1` for all match states. /// /// A lazy DFA guarantees that [`DFA::match_pattern`] can be called with /// indices up to (but not including) the length returned by this routine /// without panicking. /// /// # Panics /// /// If the given state is not a match state, then this may either panic /// or return an incorrect result. /// /// # Example /// /// This example shows a simple instance of implementing overlapping /// matches. In particular, it shows not only how to determine how many /// patterns have matched in a particular state, but also how to access /// which specific patterns have matched. /// /// Notice that we must use [`MatchKind::All`] when building the DFA. If we /// used [`MatchKind::LeftmostFirst`] instead, then the DFA would not be /// constructed in a way that supports overlapping matches. (It would only /// report a single pattern that matches at any particular point in time.) /// /// Another thing to take note of is the patterns used and the order in /// which the pattern IDs are reported. In the example below, pattern `3` /// is yielded first. Why? Because it corresponds to the match that /// appears first. Namely, the `@` symbol is part of `\S+` but not part /// of any of the other patterns. Since the `\S+` pattern has a match that /// starts to the left of any other pattern, its ID is returned before any /// other. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, Input, MatchKind}; /// /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build_many(&[ /// r"\w+", r"[a-z]+", r"[A-Z]+", r"\S+", /// ])?; /// let mut cache = dfa.create_cache(); /// let haystack = "@bar".as_bytes(); /// /// // The start state is determined by inspecting the position and the /// // initial bytes of the haystack. /// let mut sid = dfa.start_state_forward( /// &mut cache, &Input::new(haystack), /// )?; /// // Walk all the bytes in the haystack. /// for &b in haystack { /// sid = dfa.next_state(&mut cache, sid, b)?; /// } /// sid = dfa.next_eoi_state(&mut cache, sid)?; /// /// assert!(sid.is_match()); /// assert_eq!(dfa.match_len(&mut cache, sid), 3); /// // The following calls are guaranteed to not panic since `match_len` /// // returned `3` above. /// assert_eq!(dfa.match_pattern(&mut cache, sid, 0).as_usize(), 3); /// assert_eq!(dfa.match_pattern(&mut cache, sid, 1).as_usize(), 0); /// assert_eq!(dfa.match_pattern(&mut cache, sid, 2).as_usize(), 1); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn match_len(&self, cache: &Cache, id: LazyStateID) -> usize { assert!(id.is_match()); LazyRef::new(self, cache).get_cached_state(id).match_len() } /// Returns the pattern ID corresponding to the given match index in the /// given state. /// /// See [`DFA::match_len`] for an example of how to use this method /// correctly. Note that if you know your lazy DFA is configured with a /// single pattern, then this routine is never necessary since it will /// always return a pattern ID of `0` for an index of `0` when `id` /// corresponds to a match state. /// /// Typically, this routine is used when implementing an overlapping /// search, as the example for `DFA::match_len` does. /// /// # Panics /// /// If the state ID is not a match state or if the match index is out /// of bounds for the given state, then this routine may either panic /// or produce an incorrect result. If the state ID is correct and the /// match index is correct, then this routine always produces a valid /// `PatternID`. #[inline] pub fn match_pattern( &self, cache: &Cache, id: LazyStateID, match_index: usize, ) -> PatternID { // This is an optimization for the very common case of a DFA with a // single pattern. This conditional avoids a somewhat more costly path // that finds the pattern ID from the corresponding `State`, which // requires a bit of slicing/pointer-chasing. This optimization tends // to only matter when matches are frequent. if self.pattern_len() == 1 { return PatternID::ZERO; } LazyRef::new(self, cache) .get_cached_state(id) .match_pattern(match_index) } } /// A cache represents a partially computed DFA. /// /// A cache is the key component that differentiates a classical DFA and a /// hybrid NFA/DFA (also called a "lazy DFA"). Where a classical DFA builds a /// complete transition table that can handle all possible inputs, a hybrid /// NFA/DFA starts with an empty transition table and builds only the parts /// required during search. The parts that are built are stored in a cache. For /// this reason, a cache is a required parameter for nearly every operation on /// a [`DFA`]. /// /// Caches can be created from their corresponding DFA via /// [`DFA::create_cache`]. A cache can only be used with either the DFA that /// created it, or the DFA that was most recently used to reset it with /// [`Cache::reset`]. Using a cache with any other DFA may result in panics /// or incorrect results. #[derive(Clone, Debug)] pub struct Cache { // N.B. If you're looking to understand how determinization works, it // is probably simpler to first grok src/dfa/determinize.rs, since that // doesn't have the "laziness" component. /// The transition table. /// /// Given a `current` LazyStateID and an `input` byte, the next state can /// be computed via `trans[untagged(current) + equiv_class(input)]`. Notice /// that no multiplication is used. That's because state identifiers are /// "premultiplied." /// /// Note that the next state may be the "unknown" state. In this case, the /// next state is not known and determinization for `current` on `input` /// must be performed. trans: Vec<LazyStateID>, /// The starting states for this DFA. /// /// These are computed lazily. Initially, these are all set to "unknown" /// lazy state IDs. /// /// When 'starts_for_each_pattern' is disabled (the default), then the size /// of this is constrained to the possible starting configurations based /// on the search parameters. (At time of writing, that's 4.) However, /// when starting states for each pattern is enabled, then there are N /// additional groups of starting states, where each group reflects the /// different possible configurations and N is the number of patterns. starts: Vec<LazyStateID>, /// A sequence of NFA/DFA powerset states that have been computed for this /// lazy DFA. This sequence is indexable by untagged LazyStateIDs. (Every /// tagged LazyStateID can be used to index this sequence by converting it /// to its untagged form.) states: Vec<State>, /// A map from states to their corresponding IDs. This map may be accessed /// via the raw byte representation of a state, which means that a `State` /// does not need to be allocated to determine whether it already exists /// in this map. Indeed, the existence of such a state is what determines /// whether we allocate a new `State` or not. /// /// The higher level idea here is that we do just enough determinization /// for a state to check whether we've already computed it. If we have, /// then we can save a little (albeit not much) work. The real savings is /// in memory usage. If we never checked for trivially duplicate states, /// then our memory usage would explode to unreasonable levels. states_to_id: StateMap, /// Sparse sets used to track which NFA states have been visited during /// various traversals. sparses: SparseSets, /// Scratch space for traversing the NFA graph. (We use space on the heap /// instead of the call stack.) stack: Vec<NFAStateID>, /// Scratch space for building a NFA/DFA powerset state. This is used to /// help amortize allocation since not every powerset state generated is /// added to the cache. In particular, if it already exists in the cache, /// then there is no need to allocate a new `State` for it. scratch_state_builder: StateBuilderEmpty, /// A simple abstraction for handling the saving of at most a single state /// across a cache clearing. This is required for correctness. Namely, if /// adding a new state after clearing the cache fails, then the caller /// must retain the ability to continue using the state ID given. The /// state corresponding to the state ID is what we preserve across cache /// clearings. state_saver: StateSaver, /// The memory usage, in bytes, used by 'states' and 'states_to_id'. We /// track this as new states are added since states use a variable amount /// of heap. Tracking this as we add states makes it possible to compute /// the total amount of memory used by the determinizer in constant time. memory_usage_state: usize, /// The number of times the cache has been cleared. When a minimum cache /// clear count is set, then the cache will return an error instead of /// clearing the cache if the count has been exceeded. clear_count: usize, /// The total number of bytes searched since the last time this cache was /// cleared, not including the current search. /// /// This can be added to the length of the current search to get the true /// total number of bytes searched. /// /// This is generally only non-zero when the /// `Cache::search_{start,update,finish}` APIs are used to track search /// progress. bytes_searched: usize, /// The progress of the current search. /// /// This is only non-`None` when callers utlize the `Cache::search_start`, /// `Cache::search_update` and `Cache::search_finish` APIs. /// /// The purpose of recording search progress is to be able to make a /// determination about the efficiency of the cache. Namely, by keeping /// track of the progress: Option<SearchProgress>, } impl Cache { /// Create a new cache for the given lazy DFA. /// /// The cache returned should only be used for searches for the given DFA. /// If you want to reuse the cache for another DFA, then you must call /// [`Cache::reset`] with that DFA. pub fn new(dfa: &DFA) -> Cache { let mut cache = Cache { trans: alloc::vec![], starts: alloc::vec![], states: alloc::vec![], states_to_id: StateMap::new(), sparses: SparseSets::new(dfa.get_nfa().states().len()), stack: alloc::vec![], scratch_state_builder: StateBuilderEmpty::new(), state_saver: StateSaver::none(), memory_usage_state: 0, clear_count: 0, bytes_searched: 0, progress: None, }; debug!("pre-init lazy DFA cache size: {}", cache.memory_usage()); Lazy { dfa, cache: &mut cache }.init_cache(); debug!("post-init lazy DFA cache size: {}", cache.memory_usage()); cache } /// Reset this cache such that it can be used for searching with the given /// lazy DFA (and only that DFA). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different lazy DFA. /// /// Resetting a cache sets its "clear count" to 0. This is relevant if the /// lazy DFA has been configured to "give up" after it has cleared the /// cache a certain number of times. /// /// Any lazy state ID generated by the cache prior to resetting it is /// invalid after the reset. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different DFA. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let dfa1 = DFA::new(r"\w")?; /// let dfa2 = DFA::new(r"\W")?; /// /// let mut cache = dfa1.create_cache(); /// assert_eq!( /// Some(HalfMatch::must(0, 2)), /// dfa1.try_search_fwd(&mut cache, &Input::new("Δ"))?, /// ); /// /// // Using 'cache' with dfa2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the DFA we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 'dfa1' is also not /// // allowed. /// cache.reset(&dfa2); /// assert_eq!( /// Some(HalfMatch::must(0, 3)), /// dfa2.try_search_fwd(&mut cache, &Input::new("☃"))?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset(&mut self, dfa: &DFA) { Lazy::new(dfa, self).reset_cache() } /// Initializes a new search starting at the given position. /// /// If a previous search was unfinished, then it is finished automatically /// and a new search is begun. /// /// Note that keeping track of search progress is _not necessary_ /// for correct implementations of search using a lazy DFA. Keeping /// track of search progress is only necessary if you want the /// [`Config::minimum_bytes_per_state`] configuration knob to work. #[inline] pub fn search_start(&mut self, at: usize) { // If a previous search wasn't marked as finished, then finish it // now automatically. if let Some(p) = self.progress.take() { self.bytes_searched += p.len(); } self.progress = Some(SearchProgress { start: at, at }); } /// Updates the current search to indicate that it has search to the /// current position. /// /// No special care needs to be taken for reverse searches. Namely, the /// position given may be _less than_ the starting position of the search. /// /// # Panics /// /// This panics if no search has been started by [`Cache::search_start`]. #[inline] pub fn search_update(&mut self, at: usize) { let p = self.progress.as_mut().expect("no in-progress search to update"); p.at = at; } /// Indicates that a search has finished at the given position. /// /// # Panics /// /// This panics if no search has been started by [`Cache::search_start`]. #[inline] pub fn search_finish(&mut self, at: usize) { let mut p = self.progress.take().expect("no in-progress search to finish"); p.at = at; self.bytes_searched += p.len(); } /// Returns the total number of bytes that have been searched since this /// cache was last cleared. /// /// This is useful for determining the efficiency of the cache. For /// example, the lazy DFA uses this value in conjunction with the /// [`Config::minimum_bytes_per_state`] knob to help determine whether it /// should quit searching. /// /// This always returns `0` if search progress isn't being tracked. Note /// that the lazy DFA search routines in this crate always track search /// progress. pub fn search_total_len(&self) -> usize { self.bytes_searched + self.progress.as_ref().map_or(0, |p| p.len()) } /// Returns the total number of times this cache has been cleared since it /// was either created or last reset. /// /// This is useful for informational purposes or if you want to change /// search strategies based on the number of times the cache has been /// cleared. pub fn clear_count(&self) -> usize { self.clear_count } /// Returns the heap memory usage, in bytes, of this cache. /// /// This does **not** include the stack size used up by this cache. To /// compute that, use `std::mem::size_of::<Cache>()`. pub fn memory_usage(&self) -> usize { const ID_SIZE: usize = size_of::<LazyStateID>(); const STATE_SIZE: usize = size_of::<State>(); // NOTE: If you make changes to the below, then // 'minimum_cache_capacity' should be updated correspondingly. self.trans.len() * ID_SIZE + self.starts.len() * ID_SIZE + self.states.len() * STATE_SIZE // Maps likely use more memory than this, but it's probably close. + self.states_to_id.len() * (STATE_SIZE + ID_SIZE) + self.sparses.memory_usage() + self.stack.capacity() * ID_SIZE + self.scratch_state_builder.capacity() // Heap memory used by 'State' in both 'states' and 'states_to_id'. + self.memory_usage_state } } /// Keeps track of the progress of the current search. /// /// This is updated via the `Cache::search_{start,update,finish}` APIs to /// record how many bytes have been searched. This permits computing a /// heuristic that represents the efficiency of a cache, and thus helps inform /// whether the lazy DFA should give up or not. #[derive(Clone, Debug)] struct SearchProgress { start: usize, at: usize, } impl SearchProgress { /// Returns the length, in bytes, of this search so far. /// /// This automatically handles the case of a reverse search, where `at` /// is likely to be less than `start`. fn len(&self) -> usize { if self.start <= self.at { self.at - self.start } else { self.start - self.at } } } /// A map from states to state identifiers. When using std, we use a standard /// hashmap, since it's a bit faster for this use case. (Other maps, like /// one's based on FNV, have not yet been benchmarked.) /// /// The main purpose of this map is to reuse states where possible. This won't /// fully minimize the DFA, but it works well in a lot of cases. #[cfg(feature = "std")] type StateMap = std::collections::HashMap<State, LazyStateID>; #[cfg(not(feature = "std"))] type StateMap = alloc::collections::BTreeMap<State, LazyStateID>; /// A type that groups methods that require the base NFA/DFA and writable /// access to the cache. #[derive(Debug)] struct Lazy<'i, 'c> { dfa: &'i DFA, cache: &'c mut Cache, } impl<'i, 'c> Lazy<'i, 'c> { /// Creates a new 'Lazy' wrapper for a DFA and its corresponding cache. fn new(dfa: &'i DFA, cache: &'c mut Cache) -> Lazy<'i, 'c> { Lazy { dfa, cache } } /// Return an immutable view by downgrading a writable cache to a read-only /// cache. fn as_ref<'a>(&'a self) -> LazyRef<'i, 'a> { LazyRef::new(self.dfa, self.cache) } /// This is marked as 'inline(never)' to avoid bloating methods on 'DFA' /// like 'next_state' and 'next_eoi_state' that are called in critical /// areas. The idea is to let the optimizer focus on the other areas of /// those methods as the hot path. /// /// Here's an example that justifies 'inline(never)' /// /// ```ignore /// regex-cli find hybrid dfa \ /// @all-codepoints-utf8-100x '\pL{100}' --cache-capacity 10000000 /// ``` /// /// Where 'all-codepoints-utf8-100x' is the UTF-8 encoding of every /// codepoint, in sequence, repeated 100 times. /// /// With 'inline(never)' hyperfine reports 1.1s per run. With /// 'inline(always)', hyperfine reports 1.23s. So that's a 10% improvement. #[cold] #[inline(never)] fn cache_next_state( &mut self, mut current: LazyStateID, unit: alphabet::Unit, ) -> Result<LazyStateID, CacheError> { let stride2 = self.dfa.stride2(); let empty_builder = self.get_state_builder(); let builder = determinize::next( self.dfa.get_nfa(), self.dfa.get_config().get_match_kind(), &mut self.cache.sparses, &mut self.cache.stack, &self.cache.states[current.as_usize_untagged() >> stride2], unit, empty_builder, ); let save_state = !self.as_ref().state_builder_fits_in_cache(&builder); if save_state { self.save_state(current); } let next = self.add_builder_state(builder, |sid| sid)?; if save_state { current = self.saved_state_id(); } // This is the payoff. The next time 'next_state' is called with this // state and alphabet unit, it will find this transition and avoid // having to re-determinize this transition. self.set_transition(current, unit, next); Ok(next) } /// Compute and cache the starting state for the given pattern ID (if /// present) and the starting configuration. /// /// This panics if a pattern ID is given and the DFA isn't configured to /// build anchored start states for each pattern. /// /// This will never return an unknown lazy state ID. /// /// If caching this state would otherwise result in a cache that has been /// cleared too many times, then an error is returned. #[cold] #[inline(never)] fn cache_start_group( &mut self, input: &Input<'_>, start: Start, ) -> Result<LazyStateID, MatchError> { let mode = input.get_anchored(); let nfa_start_id = match mode { Anchored::No => self.dfa.get_nfa().start_unanchored(), Anchored::Yes => self.dfa.get_nfa().start_anchored(), Anchored::Pattern(pid) => { if !self.dfa.get_config().get_starts_for_each_pattern() { return Err(MatchError::unsupported_anchored(mode)); } match self.dfa.get_nfa().start_pattern(pid) { None => return Ok(self.as_ref().dead_id()), Some(sid) => sid, } } }; let id = self .cache_start_one(nfa_start_id, start) .map_err(|_| MatchError::gave_up(input.start()))?; self.set_start_state(input, start, id); Ok(id) } /// Compute and cache the starting state for the given NFA state ID and the /// starting configuration. The NFA state ID might be one of the following: /// /// 1) An unanchored start state to match any pattern. /// 2) An anchored start state to match any pattern. /// 3) An anchored start state for a particular pattern. /// /// This will never return an unknown lazy state ID. /// /// If caching this state would otherwise result in a cache that has been /// cleared too many times, then an error is returned. fn cache_start_one( &mut self, nfa_start_id: NFAStateID, start: Start, ) -> Result<LazyStateID, CacheError> { let mut builder_matches = self.get_state_builder().into_matches(); determinize::set_lookbehind_from_start( self.dfa.get_nfa(), &start, &mut builder_matches, ); self.cache.sparses.set1.clear(); determinize::epsilon_closure( self.dfa.get_nfa(), nfa_start_id, builder_matches.look_have(), &mut self.cache.stack, &mut self.cache.sparses.set1, ); let mut builder = builder_matches.into_nfa(); determinize::add_nfa_states( &self.dfa.get_nfa(), &self.cache.sparses.set1, &mut builder, ); let tag_starts = self.dfa.get_config().get_specialize_start_states(); self.add_builder_state(builder, |id| { if tag_starts { id.to_start() } else { id } }) } /// Either add the given builder state to this cache, or return an ID to an /// equivalent state already in this cache. /// /// In the case where no equivalent state exists, the idmap function given /// may be used to transform the identifier allocated. This is useful if /// the caller needs to tag the ID with additional information. /// /// This will never return an unknown lazy state ID. /// /// If caching this state would otherwise result in a cache that has been /// cleared too many times, then an error is returned. fn add_builder_state( &mut self, builder: StateBuilderNFA, idmap: impl Fn(LazyStateID) -> LazyStateID, ) -> Result<LazyStateID, CacheError> { if let Some(&cached_id) = self.cache.states_to_id.get(builder.as_bytes()) { // Since we have a cached state, put the constructed state's // memory back into our scratch space, so that it can be reused. self.put_state_builder(builder); return Ok(cached_id); } let result = self.add_state(builder.to_state(), idmap); self.put_state_builder(builder); result } /// Allocate a new state ID and add the given state to this cache. /// /// The idmap function given may be used to transform the identifier /// allocated. This is useful if the caller needs to tag the ID with /// additional information. /// /// This will never return an unknown lazy state ID. /// /// If caching this state would otherwise result in a cache that has been /// cleared too many times, then an error is returned. fn add_state( &mut self, state: State, idmap: impl Fn(LazyStateID) -> LazyStateID, ) -> Result<LazyStateID, CacheError> { if !self.as_ref().state_fits_in_cache(&state) { self.try_clear_cache()?; } // It's important for this to come second, since the above may clear // the cache. If we clear the cache after ID generation, then the ID // is likely bunk since it would have been generated based on a larger // transition table. let mut id = idmap(self.next_state_id()?); if state.is_match() { id = id.to_match(); } // Add room in the transition table. Since this is a fresh state, all // of its transitions are unknown. self.cache.trans.extend( iter::repeat(self.as_ref().unknown_id()).take(self.dfa.stride()), ); // When we add a sentinel state, we never want to set any quit // transitions. Technically, this is harmless, since sentinel states // have all of their transitions set to loop back to themselves. But // when creating sentinel states before the quit sentinel state, // this will try to call 'set_transition' on a state ID that doesn't // actually exist yet, which isn't allowed. So we just skip doing so // entirely. if !self.dfa.quitset.is_empty() && !self.as_ref().is_sentinel(id) { let quit_id = self.as_ref().quit_id(); for b in self.dfa.quitset.iter() { self.set_transition(id, alphabet::Unit::u8(b), quit_id); } } self.cache.memory_usage_state += state.memory_usage(); self.cache.states.push(state.clone()); self.cache.states_to_id.insert(state, id); Ok(id) } /// Allocate a new state ID. /// /// This will never return an unknown lazy state ID. /// /// If caching this state would otherwise result in a cache that has been /// cleared too many times, then an error is returned. fn next_state_id(&mut self) -> Result<LazyStateID, CacheError> { let sid = match LazyStateID::new(self.cache.trans.len()) { Ok(sid) => sid, Err(_) => { self.try_clear_cache()?; // This has to pass since we check that ID capacity at // construction time can fit at least MIN_STATES states. LazyStateID::new(self.cache.trans.len()).unwrap() } }; Ok(sid) } /// Attempt to clear the cache used by this lazy DFA. /// /// If clearing the cache exceeds the minimum number of required cache /// clearings, then this will return a cache error. In this case, /// callers should bubble this up as the cache can't be used until it is /// reset. Implementations of search should convert this error into a /// [`MatchError::gave_up`]. /// /// If 'self.state_saver' is set to save a state, then this state is /// persisted through cache clearing. Otherwise, the cache is returned to /// its state after initialization with two exceptions: its clear count /// is incremented and some of its memory likely has additional capacity. /// That is, clearing a cache does _not_ release memory. /// /// Otherwise, any lazy state ID generated by the cache prior to resetting /// it is invalid after the reset. fn try_clear_cache(&mut self) -> Result<(), CacheError> { let c = self.dfa.get_config(); if let Some(min_count) = c.get_minimum_cache_clear_count() { if self.cache.clear_count >= min_count { if let Some(min_bytes_per) = c.get_minimum_bytes_per_state() { let len = self.cache.search_total_len(); let min_bytes = min_bytes_per.saturating_mul(self.cache.states.len()); // If we've searched 0 bytes then probably something has // gone wrong and the lazy DFA search implementation isn't // correctly updating the search progress state. if len == 0 { trace!( "number of bytes searched is 0, but \ a minimum bytes per state searched ({}) is \ enabled, maybe Cache::search_update \ is not being used?", min_bytes_per, ); } if len < min_bytes { trace!( "lazy DFA cache has been cleared {} times, \ which exceeds the limit of {}, \ AND its bytes searched per state is less \ than the configured minimum of {}, \ therefore lazy DFA is giving up \ (bytes searched since cache clear = {}, \ number of states = {})", self.cache.clear_count, min_count, min_bytes_per, len, self.cache.states.len(), ); return Err(CacheError::bad_efficiency()); } else { trace!( "lazy DFA cache has been cleared {} times, \ which exceeds the limit of {}, \ AND its bytes searched per state is greater \ than the configured minimum of {}, \ therefore lazy DFA is continuing! \ (bytes searched since cache clear = {}, \ number of states = {})", self.cache.clear_count, min_count, min_bytes_per, len, self.cache.states.len(), ); } } else { trace!( "lazy DFA cache has been cleared {} times, \ which exceeds the limit of {}, \ since there is no configured bytes per state \ minimum, lazy DFA is giving up", self.cache.clear_count, min_count, ); return Err(CacheError::too_many_cache_clears()); } } } self.clear_cache(); Ok(()) } /// Clears _and_ resets the cache. Resetting the cache means that no /// states are persisted and the clear count is reset to 0. No heap memory /// is released. /// /// Note that the caller may reset a cache with a different DFA than what /// it was created from. In which case, the cache can now be used with the /// new DFA (and not the old DFA). fn reset_cache(&mut self) { self.cache.state_saver = StateSaver::none(); self.clear_cache(); // If a new DFA is used, it might have a different number of NFA // states, so we need to make sure our sparse sets have the appropriate // size. self.cache.sparses.resize(self.dfa.get_nfa().states().len()); self.cache.clear_count = 0; self.cache.progress = None; } /// Clear the cache used by this lazy DFA. /// /// If 'self.state_saver' is set to save a state, then this state is /// persisted through cache clearing. Otherwise, the cache is returned to /// its state after initialization with two exceptions: its clear count /// is incremented and some of its memory likely has additional capacity. /// That is, clearing a cache does _not_ release memory. /// /// Otherwise, any lazy state ID generated by the cache prior to resetting /// it is invalid after the reset. fn clear_cache(&mut self) { self.cache.trans.clear(); self.cache.starts.clear(); self.cache.states.clear(); self.cache.states_to_id.clear(); self.cache.memory_usage_state = 0; self.cache.clear_count += 1; self.cache.bytes_searched = 0; if let Some(ref mut progress) = self.cache.progress { progress.start = progress.at; } trace!( "lazy DFA cache has been cleared (count: {})", self.cache.clear_count ); self.init_cache(); // If the state we want to save is one of the sentinel // (unknown/dead/quit) states, then 'init_cache' adds those back, and // their identifier values remains invariant. So there's no need to add // it again. (And indeed, doing so would be incorrect!) if let Some((old_id, state)) = self.cache.state_saver.take_to_save() { // If the state is one of the special sentinel states, then it is // automatically added by cache initialization and its ID always // remains the same. With that said, this should never occur since // the sentinel states are all loop states back to themselves. So // we should never be in a position where we're attempting to save // a sentinel state since we never compute transitions out of a // sentinel state. assert!( !self.as_ref().is_sentinel(old_id), "cannot save sentinel state" ); let new_id = self .add_state(state, |id| { if old_id.is_start() { // We don't need to consult the // 'specialize_start_states' config knob here, because // if it's disabled, old_id.is_start() will never // return true. id.to_start() } else { id } }) // The unwrap here is OK because lazy DFA creation ensures that // we have room in the cache to add MIN_STATES states. Since // 'init_cache' above adds 3, this adds a 4th. .expect("adding one state after cache clear must work"); self.cache.state_saver = StateSaver::Saved(new_id); } } /// Initialize this cache from emptiness to a place where it can be used /// for search. /// /// This is called both at cache creation time and after the cache has been /// cleared. /// /// Primarily, this adds the three sentinel states and allocates some /// initial memory. fn init_cache(&mut self) { // Why multiply by 2 here? Because we make room for both the unanchored // and anchored start states. Unanchored is first and then anchored. let mut starts_len = Start::len().checked_mul(2).unwrap(); // ... but if we also want start states for every pattern, we make room // for that too. if self.dfa.get_config().get_starts_for_each_pattern() { starts_len += Start::len() * self.dfa.pattern_len(); } self.cache .starts .extend(iter::repeat(self.as_ref().unknown_id()).take(starts_len)); // This is the set of NFA states that corresponds to each of our three // sentinel states: the empty set. let dead = State::dead(); // This sets up some states that we use as sentinels that are present // in every DFA. While it would be technically possible to implement // this DFA without explicitly putting these states in the transition // table, this is convenient to do to make `next_state` correct for all // valid state IDs without needing explicit conditionals to special // case these sentinel states. // // All three of these states are "dead" states. That is, all of // them transition only to themselves. So once you enter one of // these states, it's impossible to leave them. Thus, any correct // search routine must explicitly check for these state types. (Sans // `unknown`, since that is only used internally to represent missing // states.) let unk_id = self.add_state(dead.clone(), |id| id.to_unknown()).unwrap(); let dead_id = self.add_state(dead.clone(), |id| id.to_dead()).unwrap(); let quit_id = self.add_state(dead.clone(), |id| id.to_quit()).unwrap(); assert_eq!(unk_id, self.as_ref().unknown_id()); assert_eq!(dead_id, self.as_ref().dead_id()); assert_eq!(quit_id, self.as_ref().quit_id()); // The idea here is that if you start in an unknown/dead/quit state and // try to transition on them, then you should end up where you started. self.set_all_transitions(unk_id, unk_id); self.set_all_transitions(dead_id, dead_id); self.set_all_transitions(quit_id, quit_id); // All of these states are technically equivalent from the FSM // perspective, so putting all three of them in the cache isn't // possible. (They are distinct merely because we use their // identifiers as sentinels to mean something, as indicated by the // names.) Moreover, we wouldn't want to do that. Unknown and quit // states are special in that they are artificial constructions // this implementation. But dead states are a natural part of // determinization. When you reach a point in the NFA where you cannot // go anywhere else, a dead state will naturally arise and we MUST // reuse the canonical dead state that we've created here. Why? Because // it is the state ID that tells the search routine whether a state is // dead or not, and thus, whether to stop the search. Having a bunch of // distinct dead states would be quite wasteful! self.cache.states_to_id.insert(dead, dead_id); } /// Save the state corresponding to the ID given such that the state /// persists through a cache clearing. /// /// While the state may persist, the ID may not. In order to discover the /// new state ID, one must call 'saved_state_id' after a cache clearing. fn save_state(&mut self, id: LazyStateID) { let state = self.as_ref().get_cached_state(id).clone(); self.cache.state_saver = StateSaver::ToSave { id, state }; } /// Returns the updated lazy state ID for a state that was persisted /// through a cache clearing. /// /// It is only correct to call this routine when both a state has been /// saved and the cache has just been cleared. Otherwise, this panics. fn saved_state_id(&mut self) -> LazyStateID { self.cache .state_saver .take_saved() .expect("state saver does not have saved state ID") } /// Set all transitions on the state 'from' to 'to'. fn set_all_transitions(&mut self, from: LazyStateID, to: LazyStateID) { for unit in self.dfa.classes.representatives(..) { self.set_transition(from, unit, to); } } /// Set the transition on 'from' for 'unit' to 'to'. /// /// This panics if either 'from' or 'to' is invalid. /// /// All unit values are OK. fn set_transition( &mut self, from: LazyStateID, unit: alphabet::Unit, to: LazyStateID, ) { assert!(self.as_ref().is_valid(from), "invalid 'from' id: {:?}", from); assert!(self.as_ref().is_valid(to), "invalid 'to' id: {:?}", to); let offset = from.as_usize_untagged() + self.dfa.classes.get_by_unit(unit); self.cache.trans[offset] = to; } /// Set the start ID for the given pattern ID (if given) and starting /// configuration to the ID given. /// /// This panics if 'id' is not valid or if a pattern ID is given and /// 'starts_for_each_pattern' is not enabled. fn set_start_state( &mut self, input: &Input<'_>, start: Start, id: LazyStateID, ) { assert!(self.as_ref().is_valid(id)); let start_index = start.as_usize(); let index = match input.get_anchored() { Anchored::No => start_index, Anchored::Yes => Start::len() + start_index, Anchored::Pattern(pid) => { assert!( self.dfa.get_config().get_starts_for_each_pattern(), "attempted to search for a specific pattern \ without enabling starts_for_each_pattern", ); let pid = pid.as_usize(); (2 * Start::len()) + (Start::len() * pid) + start_index } }; self.cache.starts[index] = id; } /// Returns a state builder from this DFA that might have existing /// capacity. This helps avoid allocs in cases where a state is built that /// turns out to already be cached. /// /// Callers must put the state builder back with 'put_state_builder', /// otherwise the allocation reuse won't work. fn get_state_builder(&mut self) -> StateBuilderEmpty { core::mem::replace( &mut self.cache.scratch_state_builder, StateBuilderEmpty::new(), ) } /// Puts the given state builder back into this DFA for reuse. /// /// Note that building a 'State' from a builder always creates a new alloc, /// so callers should always put the builder back. fn put_state_builder(&mut self, builder: StateBuilderNFA) { let _ = core::mem::replace( &mut self.cache.scratch_state_builder, builder.clear(), ); } } /// A type that groups methods that require the base NFA/DFA and read-only /// access to the cache. #[derive(Debug)] struct LazyRef<'i, 'c> { dfa: &'i DFA, cache: &'c Cache, } impl<'i, 'c> LazyRef<'i, 'c> { /// Creates a new 'Lazy' wrapper for a DFA and its corresponding cache. fn new(dfa: &'i DFA, cache: &'c Cache) -> LazyRef<'i, 'c> { LazyRef { dfa, cache } } /// Return the ID of the start state for the given configuration. /// /// If the start state has not yet been computed, then this returns an /// unknown lazy state ID. #[cfg_attr(feature = "perf-inline", inline(always))] fn get_cached_start_id( &self, input: &Input<'_>, start: Start, ) -> Result<LazyStateID, MatchError> { let start_index = start.as_usize(); let mode = input.get_anchored(); let index = match mode { Anchored::No => start_index, Anchored::Yes => Start::len() + start_index, Anchored::Pattern(pid) => { if !self.dfa.get_config().get_starts_for_each_pattern() { return Err(MatchError::unsupported_anchored(mode)); } if pid.as_usize() >= self.dfa.pattern_len() { return Ok(self.dead_id()); } (2 * Start::len()) + (Start::len() * pid.as_usize()) + start_index } }; Ok(self.cache.starts[index]) } /// Return the cached NFA/DFA powerset state for the given ID. /// /// This panics if the given ID does not address a valid state. fn get_cached_state(&self, sid: LazyStateID) -> &State { let index = sid.as_usize_untagged() >> self.dfa.stride2(); &self.cache.states[index] } /// Returns true if and only if the given ID corresponds to a "sentinel" /// state. /// /// A sentinel state is a state that signifies a special condition of /// search, and where every transition maps back to itself. See LazyStateID /// for more details. Note that start and match states are _not_ sentinels /// since they may otherwise be real states with non-trivial transitions. /// The purposes of sentinel states is purely to indicate something. Their /// transitions are not meant to be followed. fn is_sentinel(&self, id: LazyStateID) -> bool { id == self.unknown_id() || id == self.dead_id() || id == self.quit_id() } /// Returns the ID of the unknown state for this lazy DFA. fn unknown_id(&self) -> LazyStateID { // This unwrap is OK since 0 is always a valid state ID. LazyStateID::new(0).unwrap().to_unknown() } /// Returns the ID of the dead state for this lazy DFA. fn dead_id(&self) -> LazyStateID { // This unwrap is OK since the maximum value here is 1 * 512 = 512, // which is <= 2047 (the maximum state ID on 16-bit systems). Where // 512 is the worst case for our equivalence classes (every byte is a // distinct class). LazyStateID::new(1 << self.dfa.stride2()).unwrap().to_dead() } /// Returns the ID of the quit state for this lazy DFA. fn quit_id(&self) -> LazyStateID { // This unwrap is OK since the maximum value here is 2 * 512 = 1024, // which is <= 2047 (the maximum state ID on 16-bit systems). Where // 512 is the worst case for our equivalence classes (every byte is a // distinct class). LazyStateID::new(2 << self.dfa.stride2()).unwrap().to_quit() } /// Returns true if and only if the given ID is valid. /// /// An ID is valid if it is both a valid index into the transition table /// and is a multiple of the DFA's stride. fn is_valid(&self, id: LazyStateID) -> bool { let id = id.as_usize_untagged(); id < self.cache.trans.len() && id % self.dfa.stride() == 0 } /// Returns true if adding the state given would fit in this cache. fn state_fits_in_cache(&self, state: &State) -> bool { let needed = self.cache.memory_usage() + self.memory_usage_for_one_more_state(state.memory_usage()); trace!( "lazy DFA cache capacity check: {:?} ?<=? {:?}", needed, self.dfa.cache_capacity ); needed <= self.dfa.cache_capacity } /// Returns true if adding the state to be built by the given builder would /// fit in this cache. fn state_builder_fits_in_cache(&self, state: &StateBuilderNFA) -> bool { let needed = self.cache.memory_usage() + self.memory_usage_for_one_more_state(state.as_bytes().len()); needed <= self.dfa.cache_capacity } /// Returns the additional memory usage, in bytes, required to add one more /// state to this cache. The given size should be the heap size, in bytes, /// that would be used by the new state being added. fn memory_usage_for_one_more_state( &self, state_heap_size: usize, ) -> usize { const ID_SIZE: usize = size_of::<LazyStateID>(); const STATE_SIZE: usize = size_of::<State>(); self.dfa.stride() * ID_SIZE // additional space needed in trans table + STATE_SIZE // space in cache.states + (STATE_SIZE + ID_SIZE) // space in cache.states_to_id + state_heap_size // heap memory used by state itself } } /// A simple type that encapsulates the saving of a state ID through a cache /// clearing. /// /// A state ID can be marked for saving with ToSave, while a state ID can be /// saved itself with Saved. #[derive(Clone, Debug)] enum StateSaver { /// An empty state saver. In this case, no states (other than the special /// sentinel states) are preserved after clearing the cache. None, /// An ID of a state (and the state itself) that should be preserved after /// the lazy DFA's cache has been cleared. After clearing, the updated ID /// is stored in 'Saved' since it may have changed. ToSave { id: LazyStateID, state: State }, /// An ID that of a state that has been persisted through a lazy DFA /// cache clearing. The ID recorded here corresponds to an ID that was /// once marked as ToSave. The IDs are likely not equivalent even though /// the states they point to are. Saved(LazyStateID), } impl StateSaver { /// Create an empty state saver. fn none() -> StateSaver { StateSaver::None } /// Replace this state saver with an empty saver, and if this saver is a /// request to save a state, return that request. fn take_to_save(&mut self) -> Option<(LazyStateID, State)> { match core::mem::replace(self, StateSaver::None) { StateSaver::None | StateSaver::Saved(_) => None, StateSaver::ToSave { id, state } => Some((id, state)), } } /// Replace this state saver with an empty saver, and if this saver is a /// saved state (or a request to save a state), return that state's ID. /// /// The idea here is that a request to save a state isn't necessarily /// honored because it might not be needed. e.g., Some higher level code /// might request a state to be saved on the off chance that the cache gets /// cleared when a new state is added at a lower level. But if that new /// state is never added, then the cache is never cleared and the state and /// its ID remain unchanged. fn take_saved(&mut self) -> Option<LazyStateID> { match core::mem::replace(self, StateSaver::None) { StateSaver::None => None, StateSaver::Saved(id) | StateSaver::ToSave { id, .. } => Some(id), } } } /// The configuration used for building a lazy DFA. /// /// As a convenience, [`DFA::config`] is an alias for [`Config::new`]. The /// advantage of the former is that it often lets you avoid importing the /// `Config` type directly. /// /// A lazy DFA configuration is a simple data object that is typically used /// with [`Builder::configure`]. /// /// The default configuration guarantees that a search will never return a /// "gave up" or "quit" error, although it is possible for a search to fail /// if [`Config::starts_for_each_pattern`] wasn't enabled (which it is not by /// default) and an [`Anchored::Pattern`] mode is requested via [`Input`]. #[derive(Clone, Debug, Default)] pub struct Config { // As with other configuration types in this crate, we put all our knobs // in options so that we can distinguish between "default" and "not set." // This makes it possible to easily combine multiple configurations // without default values overwriting explicitly specified values. See the // 'overwrite' method. // // For docs on the fields below, see the corresponding method setters. match_kind: Option<MatchKind>, pre: Option<Option<Prefilter>>, starts_for_each_pattern: Option<bool>, byte_classes: Option<bool>, unicode_word_boundary: Option<bool>, quitset: Option<ByteSet>, specialize_start_states: Option<bool>, cache_capacity: Option<usize>, skip_cache_capacity_check: Option<bool>, minimum_cache_clear_count: Option<Option<usize>>, minimum_bytes_per_state: Option<Option<usize>>, } impl Config { /// Return a new default lazy DFA builder configuration. pub fn new() -> Config { Config::default() } /// Set the desired match semantics. /// /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the /// match semantics of Perl-like regex engines. That is, when multiple /// patterns would match at the same leftmost position, the pattern that /// appears first in the concrete syntax is chosen. /// /// Currently, the only other kind of match semantics supported is /// [`MatchKind::All`]. This corresponds to classical DFA construction /// where all possible matches are added to the lazy DFA. /// /// Typically, `All` is used when one wants to execute an overlapping /// search and `LeftmostFirst` otherwise. In particular, it rarely makes /// sense to use `All` with the various "leftmost" find routines, since the /// leftmost routines depend on the `LeftmostFirst` automata construction /// strategy. Specifically, `LeftmostFirst` adds dead states to the /// lazy DFA as a way to terminate the search and report a match. /// `LeftmostFirst` also supports non-greedy matches using this strategy /// where as `All` does not. /// /// # Example: overlapping search /// /// This example shows the typical use of `MatchKind::All`, which is to /// report overlapping matches. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// hybrid::dfa::{DFA, OverlappingState}, /// HalfMatch, Input, MatchKind, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build_many(&[r"\w+$", r"\S+$"])?; /// let mut cache = dfa.create_cache(); /// let haystack = "@foo"; /// let mut state = OverlappingState::start(); /// /// let expected = Some(HalfMatch::must(1, 4)); /// dfa.try_search_overlapping_fwd( /// &mut cache, &Input::new(haystack), &mut state, /// )?; /// assert_eq!(expected, state.get_match()); /// /// // The first pattern also matches at the same position, so re-running /// // the search will yield another match. Notice also that the first /// // pattern is returned after the second. This is because the second /// // pattern begins its match before the first, is therefore an earlier /// // match and is thus reported first. /// let expected = Some(HalfMatch::must(0, 4)); /// dfa.try_search_overlapping_fwd( /// &mut cache, &Input::new(haystack), &mut state, /// )?; /// assert_eq!(expected, state.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: reverse automaton to find start of match /// /// Another example for using `MatchKind::All` is for constructing a /// reverse automaton to find the start of a match. `All` semantics are /// used for this in order to find the longest possible match, which /// corresponds to the leftmost starting position. /// /// Note that if you need the starting position then /// [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) will handle this /// for you, so it's usually not necessary to do this yourself. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// nfa::thompson::NFA, /// Anchored, HalfMatch, Input, MatchKind, /// }; /// /// let input = Input::new("123foobar456"); /// let pattern = r"[a-z]+r"; /// /// let dfa_fwd = DFA::new(pattern)?; /// let dfa_rev = DFA::builder() /// .thompson(NFA::config().reverse(true)) /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build(pattern)?; /// let mut cache_fwd = dfa_fwd.create_cache(); /// let mut cache_rev = dfa_rev.create_cache(); /// /// let expected_fwd = HalfMatch::must(0, 9); /// let expected_rev = HalfMatch::must(0, 3); /// let got_fwd = dfa_fwd.try_search_fwd(&mut cache_fwd, &input)?.unwrap(); /// // Here we don't specify the pattern to search for since there's only /// // one pattern and we're doing a leftmost search. But if this were an /// // overlapping search, you'd need to specify the pattern that matched /// // in the forward direction. (Otherwise, you might wind up finding the /// // starting position of a match of some other pattern.) That in turn /// // requires building the reverse automaton with starts_for_each_pattern /// // enabled. /// let input = input /// .clone() /// .range(..got_fwd.offset()) /// .anchored(Anchored::Yes); /// let got_rev = dfa_rev.try_search_rev(&mut cache_rev, &input)?.unwrap(); /// assert_eq!(expected_fwd, got_fwd); /// assert_eq!(expected_rev, got_rev); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn match_kind(mut self, kind: MatchKind) -> Config { self.match_kind = Some(kind); self } /// Set a prefilter to be used whenever a start state is entered. /// /// A [`Prefilter`] in this context is meant to accelerate searches by /// looking for literal prefixes that every match for the corresponding /// pattern (or patterns) must start with. Once a prefilter produces a /// match, the underlying search routine continues on to try and confirm /// the match. /// /// Be warned that setting a prefilter does not guarantee that the search /// will be faster. While it's usually a good bet, if the prefilter /// produces a lot of false positive candidates (i.e., positions matched /// by the prefilter but not by the regex), then the overall result can /// be slower than if you had just executed the regex engine without any /// prefilters. /// /// Note that unless [`Config::specialize_start_states`] has been /// explicitly set, then setting this will also enable (when `pre` is /// `Some`) or disable (when `pre` is `None`) start state specialization. /// This occurs because without start state specialization, a prefilter /// is likely to be less effective. And without a prefilter, start state /// specialization is usually pointless. /// /// By default no prefilter is set. /// /// # Example /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// util::prefilter::Prefilter, /// Input, HalfMatch, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); /// let re = DFA::builder() /// .configure(DFA::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("foo1 barfox bar"); /// assert_eq!( /// Some(HalfMatch::must(0, 11)), /// re.try_search_fwd(&mut cache, &input)?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Be warned though that an incorrect prefilter can lead to incorrect /// results! /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// util::prefilter::Prefilter, /// Input, HalfMatch, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); /// let re = DFA::builder() /// .configure(DFA::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("foo1 barfox bar"); /// assert_eq!( /// // No match reported even though there clearly is one! /// None, /// re.try_search_fwd(&mut cache, &input)?, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn prefilter(mut self, pre: Option<Prefilter>) -> Config { self.pre = Some(pre); if self.specialize_start_states.is_none() { self.specialize_start_states = Some(self.get_prefilter().is_some()); } self } /// Whether to compile a separate start state for each pattern in the /// lazy DFA. /// /// When enabled, a separate **anchored** start state is added for each /// pattern in the lazy DFA. When this start state is used, then the DFA /// will only search for matches for the pattern specified, even if there /// are other patterns in the DFA. /// /// The main downside of this option is that it can potentially increase /// the size of the DFA and/or increase the time it takes to build the /// DFA at search time. However, since this is configuration for a lazy /// DFA, these states aren't actually built unless they're used. Enabling /// this isn't necessarily free, however, as it may result in higher cache /// usage. /// /// There are a few reasons one might want to enable this (it's disabled /// by default): /// /// 1. When looking for the start of an overlapping match (using a reverse /// DFA), doing it correctly requires starting the reverse search using the /// starting state of the pattern that matched in the forward direction. /// Indeed, when building a [`Regex`](crate::hybrid::regex::Regex), it /// will automatically enable this option when building the reverse DFA /// internally. /// 2. When you want to use a DFA with multiple patterns to both search /// for matches of any pattern or to search for anchored matches of one /// particular pattern while using the same DFA. (Otherwise, you would need /// to compile a new DFA for each pattern.) /// /// By default this is disabled. /// /// # Example /// /// This example shows how to use this option to permit the same lazy DFA /// to run both general searches for any pattern and anchored searches for /// a specific pattern. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// Anchored, HalfMatch, Input, PatternID, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().starts_for_each_pattern(true)) /// .build_many(&[r"[a-z0-9]{6}", r"[a-z][a-z0-9]{5}"])?; /// let mut cache = dfa.create_cache(); /// let haystack = "bar foo123"; /// /// // Here's a normal unanchored search that looks for any pattern. /// let expected = HalfMatch::must(0, 10); /// let input = Input::new(haystack); /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); /// // We can also do a normal anchored search for any pattern. Since it's /// // an anchored search, we position the start of the search where we /// // know the match will begin. /// let expected = HalfMatch::must(0, 10); /// let input = Input::new(haystack).range(4..); /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); /// // Since we compiled anchored start states for each pattern, we can /// // also look for matches of other patterns explicitly, even if a /// // different pattern would have normally matched. /// let expected = HalfMatch::must(1, 10); /// let input = Input::new(haystack) /// .range(4..) /// .anchored(Anchored::Pattern(PatternID::must(1))); /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { self.starts_for_each_pattern = Some(yes); self } /// Whether to attempt to shrink the size of the lazy DFA's alphabet or /// not. /// /// This option is enabled by default and should never be disabled unless /// one is debugging the lazy DFA. /// /// When enabled, the lazy DFA will use a map from all possible bytes /// to their corresponding equivalence class. Each equivalence class /// represents a set of bytes that does not discriminate between a match /// and a non-match in the DFA. For example, the pattern `[ab]+` has at /// least two equivalence classes: a set containing `a` and `b` and a set /// containing every byte except for `a` and `b`. `a` and `b` are in the /// same equivalence classes because they never discriminate between a /// match and a non-match. /// /// The advantage of this map is that the size of the transition table /// can be reduced drastically from `#states * 256 * sizeof(LazyStateID)` /// to `#states * k * sizeof(LazyStateID)` where `k` is the number of /// equivalence classes (rounded up to the nearest power of 2). As a /// result, total space usage can decrease substantially. Moreover, since a /// smaller alphabet is used, DFA compilation during search becomes faster /// as well since it will potentially be able to reuse a single transition /// for multiple bytes. /// /// **WARNING:** This is only useful for debugging lazy DFAs. Disabling /// this does not yield any speed advantages. Namely, even when this is /// disabled, a byte class map is still used while searching. The only /// difference is that every byte will be forced into its own distinct /// equivalence class. This is useful for debugging the actual generated /// transitions because it lets one see the transitions defined on actual /// bytes instead of the equivalence classes. pub fn byte_classes(mut self, yes: bool) -> Config { self.byte_classes = Some(yes); self } /// Heuristically enable Unicode word boundaries. /// /// When set, this will attempt to implement Unicode word boundaries as if /// they were ASCII word boundaries. This only works when the search input /// is ASCII only. If a non-ASCII byte is observed while searching, then a /// [`MatchError::quit`] error is returned. /// /// A possible alternative to enabling this option is to simply use an /// ASCII word boundary, e.g., via `(?-u:\b)`. The main reason to use this /// option is if you absolutely need Unicode support. This option lets one /// use a fast search implementation (a DFA) for some potentially very /// common cases, while providing the option to fall back to some other /// regex engine to handle the general case when an error is returned. /// /// If the pattern provided has no Unicode word boundary in it, then this /// option has no effect. (That is, quitting on a non-ASCII byte only /// occurs when this option is enabled _and_ a Unicode word boundary is /// present in the pattern.) /// /// This is almost equivalent to setting all non-ASCII bytes to be quit /// bytes. The only difference is that this will cause non-ASCII bytes to /// be quit bytes _only_ when a Unicode word boundary is present in the /// pattern. /// /// When enabling this option, callers _must_ be prepared to handle /// a [`MatchError`](crate::MatchError) error during search. /// When using a [`Regex`](crate::hybrid::regex::Regex), this /// corresponds to using the `try_` suite of methods. Alternatively, /// if callers can guarantee that their input is ASCII only, then a /// [`MatchError::quit`] error will never be returned while searching. /// /// This is disabled by default. /// /// # Example /// /// This example shows how to heuristically enable Unicode word boundaries /// in a pattern. It also shows what happens when a search comes across a /// non-ASCII byte. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// HalfMatch, Input, MatchError, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().unicode_word_boundary(true)) /// .build(r"\b[0-9]+\b")?; /// let mut cache = dfa.create_cache(); /// /// // The match occurs before the search ever observes the snowman /// // character, so no error occurs. /// let haystack = "foo 123 ☃"; /// let expected = Some(HalfMatch::must(0, 7)); /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; /// assert_eq!(expected, got); /// /// // Notice that this search fails, even though the snowman character /// // occurs after the ending match offset. This is because search /// // routines read one byte past the end of the search to account for /// // look-around, and indeed, this is required here to determine whether /// // the trailing \b matches. /// let haystack = "foo 123 ☃"; /// let expected = MatchError::quit(0xE2, 8); /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack)); /// assert_eq!(Err(expected), got); /// /// // Another example is executing a search where the span of the haystack /// // we specify is all ASCII, but there is non-ASCII just before it. This /// // correctly also reports an error. /// let input = Input::new("β123").range(2..); /// let expected = MatchError::quit(0xB2, 1); /// let got = dfa.try_search_fwd(&mut cache, &input); /// assert_eq!(Err(expected), got); /// /// // And similarly for the trailing word boundary. /// let input = Input::new("123β").range(..3); /// let expected = MatchError::quit(0xCE, 3); /// let got = dfa.try_search_fwd(&mut cache, &input); /// assert_eq!(Err(expected), got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn unicode_word_boundary(mut self, yes: bool) -> Config { // We have a separate option for this instead of just setting the // appropriate quit bytes here because we don't want to set quit bytes // for every regex. We only want to set them when the regex contains a // Unicode word boundary. self.unicode_word_boundary = Some(yes); self } /// Add a "quit" byte to the lazy DFA. /// /// When a quit byte is seen during search time, then search will return a /// [`MatchError::quit`] error indicating the offset at which the search /// stopped. /// /// A quit byte will always overrule any other aspects of a regex. For /// example, if the `x` byte is added as a quit byte and the regex `\w` is /// used, then observing `x` will cause the search to quit immediately /// despite the fact that `x` is in the `\w` class. /// /// This mechanism is primarily useful for heuristically enabling certain /// features like Unicode word boundaries in a DFA. Namely, if the input /// to search is ASCII, then a Unicode word boundary can be implemented /// via an ASCII word boundary with no change in semantics. Thus, a DFA /// can attempt to match a Unicode word boundary but give up as soon as it /// observes a non-ASCII byte. Indeed, if callers set all non-ASCII bytes /// to be quit bytes, then Unicode word boundaries will be permitted when /// building lazy DFAs. Of course, callers should enable /// [`Config::unicode_word_boundary`] if they want this behavior instead. /// (The advantage being that non-ASCII quit bytes will only be added if a /// Unicode word boundary is in the pattern.) /// /// When enabling this option, callers _must_ be prepared to handle a /// [`MatchError`](crate::MatchError) error during search. When using a /// [`Regex`](crate::hybrid::regex::Regex), this corresponds to using the /// `try_` suite of methods. /// /// By default, there are no quit bytes set. /// /// # Panics /// /// This panics if heuristic Unicode word boundaries are enabled and any /// non-ASCII byte is removed from the set of quit bytes. Namely, enabling /// Unicode word boundaries requires setting every non-ASCII byte to a quit /// byte. So if the caller attempts to undo any of that, then this will /// panic. /// /// # Example /// /// This example shows how to cause a search to terminate if it sees a /// `\n` byte. This could be useful if, for example, you wanted to prevent /// a user supplied pattern from matching across a line boundary. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; /// /// let dfa = DFA::builder() /// .configure(DFA::config().quit(b'\n', true)) /// .build(r"foo\p{any}+bar")?; /// let mut cache = dfa.create_cache(); /// /// let haystack = "foo\nbar"; /// // Normally this would produce a match, since \p{any} contains '\n'. /// // But since we instructed the automaton to enter a quit state if a /// // '\n' is observed, this produces a match error instead. /// let expected = MatchError::quit(b'\n', 3); /// let got = dfa.try_search_fwd( /// &mut cache, /// &Input::new(haystack), /// ).unwrap_err(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn quit(mut self, byte: u8, yes: bool) -> Config { if self.get_unicode_word_boundary() && !byte.is_ascii() && !yes { panic!( "cannot set non-ASCII byte to be non-quit when \ Unicode word boundaries are enabled" ); } if self.quitset.is_none() { self.quitset = Some(ByteSet::empty()); } if yes { self.quitset.as_mut().unwrap().add(byte); } else { self.quitset.as_mut().unwrap().remove(byte); } self } /// Enable specializing start states in the lazy DFA. /// /// When start states are specialized, an implementor of a search routine /// using a lazy DFA can tell when the search has entered a starting state. /// When start states aren't specialized, then it is impossible to know /// whether the search has entered a start state. /// /// Ideally, this option wouldn't need to exist and we could always /// specialize start states. The problem is that start states can be quite /// active. This in turn means that an efficient search routine is likely /// to ping-pong between a heavily optimized hot loop that handles most /// states and to a less optimized specialized handling of start states. /// This causes branches to get heavily mispredicted and overall can /// materially decrease throughput. Therefore, specializing start states /// should only be enabled when it is needed. /// /// Knowing whether a search is in a start state is typically useful when a /// prefilter is active for the search. A prefilter is typically only run /// when in a start state and a prefilter can greatly accelerate a search. /// Therefore, the possible cost of specializing start states is worth it /// in this case. Otherwise, if you have no prefilter, there is likely no /// reason to specialize start states. /// /// This is disabled by default, but note that it is automatically /// enabled (or disabled) if [`Config::prefilter`] is set. Namely, unless /// `specialize_start_states` has already been set, [`Config::prefilter`] /// will automatically enable or disable it based on whether a prefilter /// is present or not, respectively. This is done because a prefilter's /// effectiveness is rooted in being executed whenever the DFA is in a /// start state, and that's only possible to do when they are specialized. /// /// Note that it is plausibly reasonable to _disable_ this option /// explicitly while _enabling_ a prefilter. In that case, a prefilter /// will still be run at the beginning of a search, but never again. This /// in theory could strike a good balance if you're in a situation where a /// prefilter is likely to produce many false positive candidates. /// /// # Example /// /// This example shows how to enable start state specialization and then /// shows how to check whether a state is a start state or not. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; /// /// let dfa = DFA::builder() /// .configure(DFA::config().specialize_start_states(true)) /// .build(r"[a-z]+")?; /// let mut cache = dfa.create_cache(); /// /// let haystack = "123 foobar 4567".as_bytes(); /// let sid = dfa.start_state_forward(&mut cache, &Input::new(haystack))?; /// // The ID returned by 'start_state_forward' will always be tagged as /// // a start state when start state specialization is enabled. /// assert!(sid.is_tagged()); /// assert!(sid.is_start()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Compare the above with the default lazy DFA configuration where /// start states are _not_ specialized. In this case, the start state /// is not tagged and `sid.is_start()` returns false. /// /// ``` /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; /// /// let dfa = DFA::new(r"[a-z]+")?; /// let mut cache = dfa.create_cache(); /// /// let haystack = "123 foobar 4567".as_bytes(); /// let sid = dfa.start_state_forward(&mut cache, &Input::new(haystack))?; /// // Start states are not tagged in the default configuration! /// assert!(!sid.is_tagged()); /// assert!(!sid.is_start()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn specialize_start_states(mut self, yes: bool) -> Config { self.specialize_start_states = Some(yes); self } /// Sets the maximum amount of heap memory, in bytes, to allocate to the /// cache for use during a lazy DFA search. If the lazy DFA would otherwise /// use more heap memory, then, depending on other configuration knobs, /// either stop the search and return an error or clear the cache and /// continue the search. /// /// The default cache capacity is some "reasonable" number that will /// accommodate most regular expressions. You may find that if you need /// to build a large DFA then it may be necessary to increase the cache /// capacity. /// /// Note that while building a lazy DFA will do a "minimum" check to ensure /// the capacity is big enough, this is more or less about correctness. /// If the cache is bigger than the minimum but still "too small," then the /// lazy DFA could wind up spending a lot of time clearing the cache and /// recomputing transitions, thus negating the performance benefits of a /// lazy DFA. Thus, setting the cache capacity is mostly an experimental /// endeavor. For most common patterns, however, the default should be /// sufficient. /// /// For more details on how the lazy DFA's cache is used, see the /// documentation for [`Cache`]. /// /// # Example /// /// This example shows what happens if the configured cache capacity is /// too small. In such cases, one can override the cache capacity to make /// it bigger. Alternatively, one might want to use less memory by setting /// a smaller cache capacity. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let pattern = r"\p{L}{1000}"; /// /// // The default cache capacity is likely too small to deal with regexes /// // that are very large. Large repetitions of large Unicode character /// // classes are a common way to make very large regexes. /// let _ = DFA::new(pattern).unwrap_err(); /// // Bump up the capacity to something bigger. /// let dfa = DFA::builder() /// .configure(DFA::config().cache_capacity(100 * (1<<20))) // 100 MB /// .build(pattern)?; /// let mut cache = dfa.create_cache(); /// /// let haystack = "ͰͲͶͿΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙ".repeat(50); /// let expected = Some(HalfMatch::must(0, 2000)); /// let got = dfa.try_search_fwd(&mut cache, &Input::new(&haystack))?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn cache_capacity(mut self, bytes: usize) -> Config { self.cache_capacity = Some(bytes); self } /// Configures construction of a lazy DFA to use the minimum cache capacity /// if the configured capacity is otherwise too small for the provided NFA. /// /// This is useful if you never want lazy DFA construction to fail because /// of a capacity that is too small. /// /// In general, this option is typically not a good idea. In particular, /// while a minimum cache capacity does permit the lazy DFA to function /// where it otherwise couldn't, it's plausible that it may not function /// well if it's constantly running out of room. In that case, the speed /// advantages of the lazy DFA may be negated. On the other hand, the /// "minimum" cache capacity computed may not be completely accurate and /// could actually be bigger than what is really necessary. Therefore, it /// is plausible that using the minimum cache capacity could still result /// in very good performance. /// /// This is disabled by default. /// /// # Example /// /// This example shows what happens if the configured cache capacity is /// too small. In such cases, one could override the capacity explicitly. /// An alternative, demonstrated here, let's us force construction to use /// the minimum cache capacity if the configured capacity is otherwise /// too small. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; /// /// let pattern = r"\p{L}{1000}"; /// /// // The default cache capacity is likely too small to deal with regexes /// // that are very large. Large repetitions of large Unicode character /// // classes are a common way to make very large regexes. /// let _ = DFA::new(pattern).unwrap_err(); /// // Configure construction such it automatically selects the minimum /// // cache capacity if it would otherwise be too small. /// let dfa = DFA::builder() /// .configure(DFA::config().skip_cache_capacity_check(true)) /// .build(pattern)?; /// let mut cache = dfa.create_cache(); /// /// let haystack = "ͰͲͶͿΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙ".repeat(50); /// let expected = Some(HalfMatch::must(0, 2000)); /// let got = dfa.try_search_fwd(&mut cache, &Input::new(&haystack))?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn skip_cache_capacity_check(mut self, yes: bool) -> Config { self.skip_cache_capacity_check = Some(yes); self } /// Configure a lazy DFA search to quit after a certain number of cache /// clearings. /// /// When a minimum is set, then a lazy DFA search will *possibly* "give /// up" after the minimum number of cache clearings has occurred. This is /// typically useful in scenarios where callers want to detect whether the /// lazy DFA search is "efficient" or not. If the cache is cleared too many /// times, this is a good indicator that it is not efficient, and thus, the /// caller may wish to use some other regex engine. /// /// Note that the number of times a cache is cleared is a property of /// the cache itself. Thus, if a cache is used in a subsequent search /// with a similarly configured lazy DFA, then it could cause the /// search to "give up" if the cache needed to be cleared, depending /// on its internal count and configured minimum. The cache clear /// count can only be reset to `0` via [`DFA::reset_cache`] (or /// [`Regex::reset_cache`](crate::hybrid::regex::Regex::reset_cache) if /// you're using the `Regex` API). /// /// By default, no minimum is configured. Thus, a lazy DFA search will /// never give up due to cache clearings. If you do set this option, you /// might consider also setting [`Config::minimum_bytes_per_state`] in /// order for the lazy DFA to take efficiency into account before giving /// up. /// /// # Example /// /// This example uses a somewhat pathological configuration to demonstrate /// the _possible_ behavior of cache clearing and how it might result /// in a search that returns an error. /// /// It is important to note that the precise mechanics of how and when /// a cache gets cleared is an implementation detail. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{hybrid::dfa::DFA, Input, MatchError, MatchErrorKind}; /// /// // This is a carefully chosen regex. The idea is to pick one /// // that requires some decent number of states (hence the bounded /// // repetition). But we specifically choose to create a class with an /// // ASCII letter and a non-ASCII letter so that we can check that no new /// // states are created once the cache is full. Namely, if we fill up the /// // cache on a haystack of 'a's, then in order to match one 'β', a new /// // state will need to be created since a 'β' is encoded with multiple /// // bytes. Since there's no room for this state, the search should quit /// // at the very first position. /// let pattern = r"[aβ]{100}"; /// let dfa = DFA::builder() /// .configure( /// // Configure it so that we have the minimum cache capacity /// // possible. And that if any clearings occur, the search quits. /// DFA::config() /// .skip_cache_capacity_check(true) /// .cache_capacity(0) /// .minimum_cache_clear_count(Some(0)), /// ) /// .build(pattern)?; /// let mut cache = dfa.create_cache(); /// /// // Our search will give up before reaching the end! /// let haystack = "a".repeat(101).into_bytes(); /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); /// assert!(matches!( /// *result.unwrap_err().kind(), /// MatchErrorKind::GaveUp { .. }, /// )); /// /// // Now that we know the cache is full, if we search a haystack that we /// // know will require creating at least one new state, it should not /// // be able to make much progress. /// let haystack = "β".repeat(101).into_bytes(); /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); /// assert!(matches!( /// *result.unwrap_err().kind(), /// MatchErrorKind::GaveUp { .. }, /// )); /// /// // If we reset the cache, then we should be able to create more states /// // and make more progress with searching for betas. /// cache.reset(&dfa); /// let haystack = "β".repeat(101).into_bytes(); /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); /// assert!(matches!( /// *result.unwrap_err().kind(), /// MatchErrorKind::GaveUp { .. }, /// )); /// /// // ... switching back to ASCII still makes progress since it just needs /// // to set transitions on existing states! /// let haystack = "a".repeat(101).into_bytes(); /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); /// assert!(matches!( /// *result.unwrap_err().kind(), /// MatchErrorKind::GaveUp { .. }, /// )); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn minimum_cache_clear_count(mut self, min: Option<usize>) -> Config { self.minimum_cache_clear_count = Some(min); self } /// Configure a lazy DFA search to quit only when its efficiency drops /// below the given minimum. /// /// The efficiency of the cache is determined by the number of DFA states /// compiled per byte of haystack searched. For example, if the efficiency /// is 2, then it means the lazy DFA is creating a new DFA state after /// searching approximately 2 bytes in a haystack. Generally speaking, 2 /// is quite bad and it's likely that even a slower regex engine like the /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) would be faster. /// /// This has no effect if [`Config::minimum_cache_clear_count`] is not set. /// Namely, this option only kicks in when the cache has been cleared more /// than the minimum number. If no minimum is set, then the cache is simply /// cleared whenever it fills up and it is impossible for the lazy DFA to /// quit due to ineffective use of the cache. /// /// In general, if one is setting [`Config::minimum_cache_clear_count`], /// then one should probably also set this knob as well. The reason is /// that the absolute number of times the cache is cleared is generally /// not a great predictor of efficiency. For example, if a new DFA state /// is created for every 1,000 bytes searched, then it wouldn't be hard /// for the cache to get cleared more than `N` times and then cause the /// lazy DFA to quit. But a new DFA state every 1,000 bytes is likely quite /// good from a performance perspective, and it's likely that the lazy /// DFA should continue searching, even if it requires clearing the cache /// occasionally. /// /// Finally, note that if you're implementing your own lazy DFA search /// routine and also want this efficiency check to work correctly, then /// you'll need to use the following routines to record search progress: /// /// * Call [`Cache::search_start`] at the beginning of every search. /// * Call [`Cache::search_update`] whenever [`DFA::next_state`] is /// called. /// * Call [`Cache::search_finish`] before completing a search. (It is /// not strictly necessary to call this when an error is returned, as /// `Cache::search_start` will automatically finish the previous search /// for you. But calling it where possible before returning helps improve /// the accuracy of how many bytes have actually been searched.) pub fn minimum_bytes_per_state(mut self, min: Option<usize>) -> Config { self.minimum_bytes_per_state = Some(min); self } /// Returns the match semantics set in this configuration. pub fn get_match_kind(&self) -> MatchKind { self.match_kind.unwrap_or(MatchKind::LeftmostFirst) } /// Returns the prefilter set in this configuration, if one at all. pub fn get_prefilter(&self) -> Option<&Prefilter> { self.pre.as_ref().unwrap_or(&None).as_ref() } /// Returns whether this configuration has enabled anchored starting states /// for every pattern in the DFA. pub fn get_starts_for_each_pattern(&self) -> bool { self.starts_for_each_pattern.unwrap_or(false) } /// Returns whether this configuration has enabled byte classes or not. /// This is typically a debugging oriented option, as disabling it confers /// no speed benefit. pub fn get_byte_classes(&self) -> bool { self.byte_classes.unwrap_or(true) } /// Returns whether this configuration has enabled heuristic Unicode word /// boundary support. When enabled, it is possible for a search to return /// an error. pub fn get_unicode_word_boundary(&self) -> bool { self.unicode_word_boundary.unwrap_or(false) } /// Returns whether this configuration will instruct the lazy DFA to enter /// a quit state whenever the given byte is seen during a search. When at /// least one byte has this enabled, it is possible for a search to return /// an error. pub fn get_quit(&self, byte: u8) -> bool { self.quitset.map_or(false, |q| q.contains(byte)) } /// Returns whether this configuration will instruct the lazy DFA to /// "specialize" start states. When enabled, the lazy DFA will tag start /// states so that search routines using the lazy DFA can detect when /// it's in a start state and do some kind of optimization (like run a /// prefilter). pub fn get_specialize_start_states(&self) -> bool { self.specialize_start_states.unwrap_or(false) } /// Returns the cache capacity set on this configuration. pub fn get_cache_capacity(&self) -> usize { self.cache_capacity.unwrap_or(2 * (1 << 20)) } /// Returns whether the cache capacity check should be skipped. pub fn get_skip_cache_capacity_check(&self) -> bool { self.skip_cache_capacity_check.unwrap_or(false) } /// Returns, if set, the minimum number of times the cache must be cleared /// before a lazy DFA search can give up. When no minimum is set, then a /// search will never quit and will always clear the cache whenever it /// fills up. pub fn get_minimum_cache_clear_count(&self) -> Option<usize> { self.minimum_cache_clear_count.unwrap_or(None) } /// Returns, if set, the minimum number of bytes per state that need to be /// processed in order for the lazy DFA to keep going. If the minimum falls /// below this number (and the cache has been cleared a minimum number of /// times), then the lazy DFA will return a "gave up" error. pub fn get_minimum_bytes_per_state(&self) -> Option<usize> { self.minimum_bytes_per_state.unwrap_or(None) } /// Returns the minimum lazy DFA cache capacity required for the given NFA. /// /// The cache capacity required for a particular NFA may change without /// notice. Callers should not rely on it being stable. /// /// This is useful for informational purposes, but can also be useful for /// other reasons. For example, if one wants to check the minimum cache /// capacity themselves or if one wants to set the capacity based on the /// minimum. /// /// This may return an error if this configuration does not support all of /// the instructions used in the given NFA. For example, if the NFA has a /// Unicode word boundary but this configuration does not enable heuristic /// support for Unicode word boundaries. pub fn get_minimum_cache_capacity( &self, nfa: &thompson::NFA, ) -> Result<usize, BuildError> { let quitset = self.quit_set_from_nfa(nfa)?; let classes = self.byte_classes_from_nfa(nfa, &quitset); let starts = self.get_starts_for_each_pattern(); Ok(minimum_cache_capacity(nfa, &classes, starts)) } /// Returns the byte class map used during search from the given NFA. /// /// If byte classes are disabled on this configuration, then a map is /// returned that puts each byte in its own equivalent class. fn byte_classes_from_nfa( &self, nfa: &thompson::NFA, quit: &ByteSet, ) -> ByteClasses { if !self.get_byte_classes() { // The lazy DFA will always use the equivalence class map, but // enabling this option is useful for debugging. Namely, this will // cause all transitions to be defined over their actual bytes // instead of an opaque equivalence class identifier. The former is // much easier to grok as a human. ByteClasses::singletons() } else { let mut set = nfa.byte_class_set().clone(); // It is important to distinguish any "quit" bytes from all other // bytes. Otherwise, a non-quit byte may end up in the same class // as a quit byte, and thus cause the DFA stop when it shouldn't. // // Test case: // // regex-cli find hybrid regex -w @conn.json.1000x.log \ // '^#' '\b10\.55\.182\.100\b' if !quit.is_empty() { set.add_set(&quit); } set.byte_classes() } } /// Return the quit set for this configuration and the given NFA. /// /// This may return an error if the NFA is incompatible with this /// configuration's quit set. For example, if the NFA has a Unicode word /// boundary and the quit set doesn't include non-ASCII bytes. fn quit_set_from_nfa( &self, nfa: &thompson::NFA, ) -> Result<ByteSet, BuildError> { let mut quit = self.quitset.unwrap_or(ByteSet::empty()); if nfa.look_set_any().contains_word_unicode() { if self.get_unicode_word_boundary() { for b in 0x80..=0xFF { quit.add(b); } } else { // If heuristic support for Unicode word boundaries wasn't // enabled, then we can still check if our quit set is correct. // If the caller set their quit bytes in a way that causes the // DFA to quit on at least all non-ASCII bytes, then that's all // we need for heuristic support to work. if !quit.contains_range(0x80, 0xFF) { return Err( BuildError::unsupported_dfa_word_boundary_unicode(), ); } } } Ok(quit) } /// Overwrite the default configuration such that the options in `o` are /// always used. If an option in `o` is not set, then the corresponding /// option in `self` is used. If it's not set in `self` either, then it /// remains not set. fn overwrite(&self, o: Config) -> Config { Config { match_kind: o.match_kind.or(self.match_kind), pre: o.pre.or_else(|| self.pre.clone()), starts_for_each_pattern: o .starts_for_each_pattern .or(self.starts_for_each_pattern), byte_classes: o.byte_classes.or(self.byte_classes), unicode_word_boundary: o .unicode_word_boundary .or(self.unicode_word_boundary), quitset: o.quitset.or(self.quitset), specialize_start_states: o .specialize_start_states .or(self.specialize_start_states), cache_capacity: o.cache_capacity.or(self.cache_capacity), skip_cache_capacity_check: o .skip_cache_capacity_check .or(self.skip_cache_capacity_check), minimum_cache_clear_count: o .minimum_cache_clear_count .or(self.minimum_cache_clear_count), minimum_bytes_per_state: o .minimum_bytes_per_state .or(self.minimum_bytes_per_state), } } } /// A builder for constructing a lazy deterministic finite automaton from /// regular expressions. /// /// As a convenience, [`DFA::builder`] is an alias for [`Builder::new`]. The /// advantage of the former is that it often lets you avoid importing the /// `Builder` type directly. /// /// This builder provides two main things: /// /// 1. It provides a few different `build` routines for actually constructing /// a DFA from different kinds of inputs. The most convenient is /// [`Builder::build`], which builds a DFA directly from a pattern string. The /// most flexible is [`Builder::build_from_nfa`], which builds a DFA straight /// from an NFA. /// 2. The builder permits configuring a number of things. /// [`Builder::configure`] is used with [`Config`] to configure aspects of /// the DFA and the construction process itself. [`Builder::syntax`] and /// [`Builder::thompson`] permit configuring the regex parser and Thompson NFA /// construction, respectively. The syntax and thompson configurations only /// apply when building from a pattern string. /// /// This builder always constructs a *single* lazy DFA. As such, this builder /// can only be used to construct regexes that either detect the presence /// of a match or find the end location of a match. A single DFA cannot /// produce both the start and end of a match. For that information, use a /// [`Regex`](crate::hybrid::regex::Regex), which can be similarly configured /// using [`regex::Builder`](crate::hybrid::regex::Builder). The main reason /// to use a DFA directly is if the end location of a match is enough for your /// use case. Namely, a `Regex` will construct two lazy DFAs instead of one, /// since a second reverse DFA is needed to find the start of a match. /// /// # Example /// /// This example shows how to build a lazy DFA that uses a tiny cache capacity /// and completely disables Unicode. That is: /// /// * Things such as `\w`, `.` and `\b` are no longer Unicode-aware. `\w` /// and `\b` are ASCII-only while `.` matches any byte except for `\n` /// (instead of any UTF-8 encoding of a Unicode scalar value except for /// `\n`). Things that are Unicode only, such as `\pL`, are not allowed. /// * The pattern itself is permitted to match invalid UTF-8. For example, /// things like `[^a]` that match any byte except for `a` are permitted. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// nfa::thompson, /// util::syntax, /// HalfMatch, Input, /// }; /// /// let dfa = DFA::builder() /// .configure(DFA::config().cache_capacity(5_000)) /// .thompson(thompson::Config::new().utf8(false)) /// .syntax(syntax::Config::new().unicode(false).utf8(false)) /// .build(r"foo[^b]ar.*")?; /// let mut cache = dfa.create_cache(); /// /// let haystack = b"\xFEfoo\xFFar\xE2\x98\xFF\n"; /// let expected = Some(HalfMatch::must(0, 10)); /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Builder { config: Config, #[cfg(feature = "syntax")] thompson: thompson::Compiler, } impl Builder { /// Create a new lazy DFA builder with the default configuration. pub fn new() -> Builder { Builder { config: Config::default(), #[cfg(feature = "syntax")] thompson: thompson::Compiler::new(), } } /// Build a lazy DFA from the given pattern. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(feature = "syntax")] pub fn build(&self, pattern: &str) -> Result<DFA, BuildError> { self.build_many(&[pattern]) } /// Build a lazy DFA from the given patterns. /// /// When matches are returned, the pattern ID corresponds to the index of /// the pattern in the slice given. #[cfg(feature = "syntax")] pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<DFA, BuildError> { let nfa = self .thompson .clone() // We can always forcefully disable captures because DFAs do not // support them. .configure( thompson::Config::new() .which_captures(thompson::WhichCaptures::None), ) .build_many(patterns) .map_err(BuildError::nfa)?; self.build_from_nfa(nfa) } /// Build a DFA from the given NFA. /// /// Note that this requires owning a `thompson::NFA`. While this may force /// you to clone the NFA, such a clone is not a deep clone. Namely, NFAs /// are defined internally to support shared ownership such that cloning is /// very cheap. /// /// # Example /// /// This example shows how to build a lazy DFA if you already have an NFA /// in hand. /// /// ``` /// use regex_automata::{ /// hybrid::dfa::DFA, /// nfa::thompson, /// HalfMatch, Input, /// }; /// /// let haystack = "foo123bar"; /// /// // This shows how to set non-default options for building an NFA. /// let nfa = thompson::Compiler::new() /// .configure(thompson::Config::new().shrink(true)) /// .build(r"[0-9]+")?; /// let dfa = DFA::builder().build_from_nfa(nfa)?; /// let mut cache = dfa.create_cache(); /// let expected = Some(HalfMatch::must(0, 6)); /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_from_nfa( &self, nfa: thompson::NFA, ) -> Result<DFA, BuildError> { let quitset = self.config.quit_set_from_nfa(&nfa)?; let classes = self.config.byte_classes_from_nfa(&nfa, &quitset); // Check that we can fit at least a few states into our cache, // otherwise it's pretty senseless to use the lazy DFA. This does have // a possible failure mode though. This assumes the maximum size of a // state in powerset space (so, the total number of NFA states), which // may never actually materialize, and could be quite a bit larger // than the actual biggest state. If this turns out to be a problem, // we could expose a knob that disables this check. But if so, we have // to be careful not to panic in other areas of the code (the cache // clearing and init code) that tend to assume some minimum useful // cache capacity. let min_cache = minimum_cache_capacity( &nfa, &classes, self.config.get_starts_for_each_pattern(), ); let mut cache_capacity = self.config.get_cache_capacity(); if cache_capacity < min_cache { // When the caller has asked us to skip the cache capacity check, // then we simply force the cache capacity to its minimum amount // and mush on. if self.config.get_skip_cache_capacity_check() { debug!( "given capacity ({}) is too small, \ since skip_cache_capacity_check is enabled, \ setting cache capacity to minimum ({})", cache_capacity, min_cache, ); cache_capacity = min_cache; } else { return Err(BuildError::insufficient_cache_capacity( min_cache, cache_capacity, )); } } // We also need to check that we can fit at least some small number // of states in our state ID space. This is unlikely to trigger in // >=32-bit systems, but 16-bit systems have a pretty small state ID // space since a number of bits are used up as sentinels. if let Err(err) = minimum_lazy_state_id(&classes) { return Err(BuildError::insufficient_state_id_capacity(err)); } let stride2 = classes.stride2(); let start_map = StartByteMap::new(nfa.look_matcher()); Ok(DFA { config: self.config.clone(), nfa, stride2, start_map, classes, quitset, cache_capacity, }) } /// Apply the given lazy DFA configuration options to this builder. pub fn configure(&mut self, config: Config) -> &mut Builder { self.config = self.config.overwrite(config); self } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. /// /// These settings only apply when constructing a lazy DFA directly from a /// pattern. #[cfg(feature = "syntax")] pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { self.thompson.syntax(config); self } /// Set the Thompson NFA configuration for this builder using /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). /// /// This permits setting things like whether the DFA should match the regex /// in reverse or if additional time should be spent shrinking the size of /// the NFA. /// /// These settings only apply when constructing a DFA directly from a /// pattern. #[cfg(feature = "syntax")] pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { self.thompson.configure(config); self } } /// Represents the current state of an overlapping search. /// /// This is used for overlapping searches since they need to know something /// about the previous search. For example, when multiple patterns match at the /// same position, this state tracks the last reported pattern so that the next /// search knows whether to report another matching pattern or continue with /// the search at the next position. Additionally, it also tracks which state /// the last search call terminated in. /// /// This type provides little introspection capabilities. The only thing a /// caller can do is construct it and pass it around to permit search routines /// to use it to track state, and also ask whether a match has been found. /// /// Callers should always provide a fresh state constructed via /// [`OverlappingState::start`] when starting a new search. Reusing state from /// a previous search may result in incorrect results. #[derive(Clone, Debug, Eq, PartialEq)] pub struct OverlappingState { /// The match reported by the most recent overlapping search to use this /// state. /// /// If a search does not find any matches, then it is expected to clear /// this value. pub(crate) mat: Option<HalfMatch>, /// The state ID of the state at which the search was in when the call /// terminated. When this is a match state, `last_match` must be set to a /// non-None value. /// /// A `None` value indicates the start state of the corresponding /// automaton. We cannot use the actual ID, since any one automaton may /// have many start states, and which one is in use depends on several /// search-time factors. pub(crate) id: Option<LazyStateID>, /// The position of the search. /// /// When `id` is None (i.e., we are starting a search), this is set to /// the beginning of the search as given by the caller regardless of its /// current value. Subsequent calls to an overlapping search pick up at /// this offset. pub(crate) at: usize, /// The index into the matching patterns of the next match to report if the /// current state is a match state. Note that this may be 1 greater than /// the total number of matches to report for the current match state. (In /// which case, no more matches should be reported at the current position /// and the search should advance to the next position.) pub(crate) next_match_index: Option<usize>, /// This is set to true when a reverse overlapping search has entered its /// EOI transitions. /// /// This isn't used in a forward search because it knows to stop once the /// position exceeds the end of the search range. In a reverse search, /// since we use unsigned offsets, we don't "know" once we've gone past /// `0`. So the only way to detect it is with this extra flag. The reverse /// overlapping search knows to terminate specifically after it has /// reported all matches after following the EOI transition. pub(crate) rev_eoi: bool, } impl OverlappingState { /// Create a new overlapping state that begins at the start state of any /// automaton. pub fn start() -> OverlappingState { OverlappingState { mat: None, id: None, at: 0, next_match_index: None, rev_eoi: false, } } /// Return the match result of the most recent search to execute with this /// state. /// /// A searches will clear this result automatically, such that if no /// match is found, this will correctly report `None`. pub fn get_match(&self) -> Option<HalfMatch> { self.mat } } /// Runs the given overlapping `search` function (forwards or backwards) until /// a match is found whose offset does not split a codepoint. /// /// This is *not* always correct to call. It should only be called when the /// underlying NFA has UTF-8 mode enabled *and* it can produce zero-width /// matches. Calling this when both of those things aren't true might result /// in legitimate matches getting skipped. #[cold] #[inline(never)] fn skip_empty_utf8_splits_overlapping<F>( input: &Input<'_>, state: &mut OverlappingState, mut search: F, ) -> Result<(), MatchError> where F: FnMut(&Input<'_>, &mut OverlappingState) -> Result<(), MatchError>, { // Note that this routine works for forwards and reverse searches // even though there's no code here to handle those cases. That's // because overlapping searches drive themselves to completion via // `OverlappingState`. So all we have to do is push it until no matches are // found. let mut hm = match state.get_match() { None => return Ok(()), Some(hm) => hm, }; if input.get_anchored().is_anchored() { if !input.is_char_boundary(hm.offset()) { state.mat = None; } return Ok(()); } while !input.is_char_boundary(hm.offset()) { search(input, state)?; hm = match state.get_match() { None => return Ok(()), Some(hm) => hm, }; } Ok(()) } /// Based on the minimum number of states required for a useful lazy DFA cache, /// this returns the minimum lazy state ID that must be representable. /// /// It's not likely for this to have any impact 32-bit systems (or higher), but /// on 16-bit systems, the lazy state ID space is quite constrained and thus /// may be insufficient if our MIN_STATES value is (for some reason) too high. fn minimum_lazy_state_id( classes: &ByteClasses, ) -> Result<LazyStateID, LazyStateIDError> { let stride = 1 << classes.stride2(); let min_state_index = MIN_STATES.checked_sub(1).unwrap(); LazyStateID::new(min_state_index * stride) } /// Based on the minimum number of states required for a useful lazy DFA cache, /// this returns a heuristic minimum number of bytes of heap space required. /// /// This is a "heuristic" because the minimum it returns is likely bigger than /// the true minimum. Namely, it assumes that each powerset NFA/DFA state uses /// the maximum number of NFA states (all of them). This is likely bigger /// than what is required in practice. Computing the true minimum effectively /// requires determinization, which is probably too much work to do for a /// simple check like this. /// /// One of the issues with this approach IMO is that it requires that this /// be in sync with the calculation above for computing how much heap memory /// the DFA cache uses. If we get it wrong, it's possible for example for the /// minimum to be smaller than the computed heap memory, and thus, it may be /// the case that we can't add the required minimum number of states. That in /// turn will make lazy DFA panic because we assume that we can add at least a /// minimum number of states. /// /// Another approach would be to always allow the minimum number of states to /// be added to the lazy DFA cache, even if it exceeds the configured cache /// limit. This does mean that the limit isn't really a limit in all cases, /// which is unfortunate. But it does at least guarantee that the lazy DFA can /// always make progress, even if it is slow. (This approach is very similar to /// enabling the 'skip_cache_capacity_check' config knob, except it wouldn't /// rely on cache size calculation. Instead, it would just always permit a /// minimum number of states to be added.) fn minimum_cache_capacity( nfa: &thompson::NFA, classes: &ByteClasses, starts_for_each_pattern: bool, ) -> usize { const ID_SIZE: usize = size_of::<LazyStateID>(); const STATE_SIZE: usize = size_of::<State>(); let stride = 1 << classes.stride2(); let states_len = nfa.states().len(); let sparses = 2 * states_len * NFAStateID::SIZE; let trans = MIN_STATES * stride * ID_SIZE; let mut starts = Start::len() * ID_SIZE; if starts_for_each_pattern { starts += (Start::len() * nfa.pattern_len()) * ID_SIZE; } // The min number of states HAS to be at least 4: we have 3 sentinel states // and then we need space for one more when we save a state after clearing // the cache. We also need space for one more, otherwise we get stuck in a // loop where we try to add a 5th state, which gets rejected, which clears // the cache, which adds back a saved state (4th total state) which then // tries to add the 5th state again. assert!(MIN_STATES >= 5, "minimum number of states has to be at least 5"); // The minimum number of non-sentinel states. We consider this separately // because sentinel states are much smaller in that they contain no NFA // states. Given our aggressive calculation here, it's worth being more // precise with the number of states we need. let non_sentinel = MIN_STATES.checked_sub(SENTINEL_STATES).unwrap(); // Every `State` has 5 bytes for flags, 4 bytes (max) for the number of // patterns, followed by 32-bit encodings of patterns and then delta // varint encodings of NFA state IDs. We use the worst case (which isn't // technically possible) of 5 bytes for each NFA state ID. // // HOWEVER, three of the states needed by a lazy DFA are just the sentinel // unknown, dead and quit states. Those states have a known size and it is // small. let dead_state_size = State::dead().memory_usage(); let max_state_size = 5 + 4 + (nfa.pattern_len() * 4) + (states_len * 5); let states = (SENTINEL_STATES * (STATE_SIZE + dead_state_size)) + (non_sentinel * (STATE_SIZE + max_state_size)); // NOTE: We don't double count heap memory used by State for this map since // we use reference counting to avoid doubling memory usage. (This tends to // be where most memory is allocated in the cache.) let states_to_sid = (MIN_STATES * STATE_SIZE) + (MIN_STATES * ID_SIZE); let stack = states_len * NFAStateID::SIZE; let scratch_state_builder = max_state_size; trans + starts + states + states_to_sid + sparses + stack + scratch_state_builder } #[cfg(all(test, feature = "syntax"))] mod tests { use super::*; // Tests that we handle heuristic Unicode word boundary support in reverse // DFAs in the specific case of contextual searches. // // I wrote this test when I discovered a bug in how heuristic word // boundaries were handled. Namely, that the starting state selection // didn't consider the DFA's quit byte set when looking at the byte // immediately before the start of the search (or immediately after the // end of the search in the case of a reverse search). As a result, it was // possible for '\bfoo\b' to match 'β123' because the trailing \xB2 byte // in the 'β' codepoint would be treated as a non-word character. But of // course, this search should trigger the DFA to quit, since there is a // non-ASCII byte in consideration. // // Thus, I fixed 'start_state_{forward,reverse}' to check the quit byte set // if it wasn't empty. The forward case is tested in the doc test for the // Config::unicode_word_boundary API. We test the reverse case here, which // is sufficiently niche that it doesn't really belong in a doc test. #[test] fn heuristic_unicode_reverse() { let dfa = DFA::builder() .configure(DFA::config().unicode_word_boundary(true)) .thompson(thompson::Config::new().reverse(true)) .build(r"\b[0-9]+\b") .unwrap(); let mut cache = dfa.create_cache(); let input = Input::new("β123").range(2..); let expected = MatchError::quit(0xB2, 1); let got = dfa.try_search_rev(&mut cache, &input); assert_eq!(Err(expected), got); let input = Input::new("123β").range(..3); let expected = MatchError::quit(0xCE, 3); let got = dfa.try_search_rev(&mut cache, &input); assert_eq!(Err(expected), got); } } <file_sep>/regex-cli/args/common.rs use lexopt::{Arg, Parser}; use crate::args::{Configurable, Usage}; /// This exposes all of the configuration knobs on a regex_automata::Input via /// CLI flags. The only aspect of regex_automata::Input that this does not /// cover is the haystack, which should be provided by other means (usually /// with `Haystack`). #[derive(Debug, Default)] pub struct Config { pub quiet: bool, pub verbose: bool, pub no_table: bool, } impl Config { pub fn table(&self) -> bool { !self.no_table } } impl Configurable for Config { fn configure( &mut self, _: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('q') | Arg::Long("quiet") => { self.quiet = true; } Arg::Long("verbose") => { self.verbose = true; } Arg::Long("no-table") => { self.no_table = true; } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "-q, --quiet", "Suppress some output.", r#" This is a generic flag that suppresses some (but not all) output. Which output is suppressed depends on the command. For example, using the -q/--quiet flag with the 'regex-cli debug' variety of commands will only show the properties of the objected being printed and will suppress the debug printing of the object itself. "#, ), Usage::new( "--verbose", "Add more output.", r#" This is a generic flag that expands output beyond the "normal" amount. Which output is added depends on the command. "#, ), Usage::new( "--no-table", "Omit any table of information from the output.", r#" Many commands in this tool will print a table of property information related to the task being performed. Passing this flag will suppress that table. "#, ), ]; USAGES } } <file_sep>/testdata/utf8.toml # These test the UTF-8 modes expose by regex-automata. Namely, when utf8 is # true, then we promise that the haystack is valid UTF-8. (Otherwise behavior # is unspecified.) This also corresponds to building the regex engine with the # following two guarantees: # # 1) For any non-empty match reported, its span is guaranteed to correspond to # valid UTF-8. # 2) All empty or zero-width matches reported must never split a UTF-8 # encoded codepoint. If the haystack has invalid UTF-8, then this results in # unspecified behavior. # # The (2) is in particular what we focus our testing on since (1) is generally # guaranteed by regex-syntax's AST-to-HIR translator and is well tested there. # The thing with (2) is that it can't be described in the HIR, so the regex # engines have to handle that case. Thus, we test it here. # # Note that it is possible to build a regex that has property (1) but not # (2), and vice versa. This is done by building the HIR with 'utf8=true' but # building the Thompson NFA with 'utf8=false'. We don't test that here because # the harness doesn't expose a way to enable or disable UTF-8 mode with that # granularity. Instead, those combinations are lightly tested via doc examples. # That's not to say that (1) without (2) is uncommon. Indeed, ripgrep uses it # because it cannot guarantee that its haystack is valid UTF-8. # This tests that an empty regex doesn't split a codepoint. [[test]] name = "empty-utf8yes" regex = '' haystack = '☃' matches = [[0, 0], [3, 3]] unicode = true utf8 = true # Tests the overlapping case of the above. [[test]] name = "empty-utf8yes-overlapping" regex = '' haystack = '☃' matches = [[0, 0], [3, 3]] unicode = true utf8 = true match-kind = "all" search-kind = "overlapping" # This tests that an empty regex DOES split a codepoint when utf=false. [[test]] name = "empty-utf8no" regex = '' haystack = '☃' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] unicode = true utf8 = false # Tests the overlapping case of the above. [[test]] name = "empty-utf8no-overlapping" regex = '' haystack = '☃' matches = [[0, 0], [1, 1], [2, 2], [3, 3]] unicode = true utf8 = false match-kind = "all" search-kind = "overlapping" # This tests that an empty regex doesn't split a codepoint, even if we give # it bounds entirely within the codepoint. # # This is one of the trickier cases and is what motivated the current UTF-8 # mode design. In particular, at one point, this test failed the 'is_match' # variant of the test but not 'find'. This is because the 'is_match' code path # is specifically optimized for "was a match found" rather than "where is the # match." In the former case, you don't really care about the empty-vs-non-empty # matches, and thus, the codepoint splitting filtering logic wasn't getting # applied. (In multiple ways across multiple regex engines.) In this way, you # can wind up with a situation where 'is_match' says "yes," but 'find' says, # "I didn't find anything." Which is... not great. # # I could have decided to say that providing boundaries that themselves split # a codepoint would have unspecified behavior. But I couldn't quite convince # myself that such boundaries were the only way to get an inconsistency between # 'is_match' and 'find'. # # Note that I also tried to come up with a test like this that fails without # using `bounds`. Specifically, a test where 'is_match' and 'find' disagree. # But I couldn't do it, and I'm tempted to conclude it is impossible. The # fundamental problem is that you need to simultaneously produce an empty match # that splits a codepoint while *not* matching before or after the codepoint. [[test]] name = "empty-utf8yes-bounds" regex = '' haystack = '𝛃' bounds = [1, 3] matches = [] unicode = true utf8 = true # Tests the overlapping case of the above. [[test]] name = "empty-utf8yes-bounds-overlapping" regex = '' haystack = '𝛃' bounds = [1, 3] matches = [] unicode = true utf8 = true match-kind = "all" search-kind = "overlapping" # This tests that an empty regex splits a codepoint when the bounds are # entirely within the codepoint. [[test]] name = "empty-utf8no-bounds" regex = '' haystack = '𝛃' bounds = [1, 3] matches = [[1, 1], [2, 2], [3, 3]] unicode = true utf8 = false # Tests the overlapping case of the above. [[test]] name = "empty-utf8no-bounds-overlapping" regex = '' haystack = '𝛃' bounds = [1, 3] matches = [[1, 1], [2, 2], [3, 3]] unicode = true utf8 = false match-kind = "all" search-kind = "overlapping" # In this test, we anchor the search. Since the start position is also a UTF-8 # boundary, we get a match. [[test]] name = "empty-utf8yes-anchored" regex = '' haystack = '𝛃' matches = [[0, 0]] anchored = true unicode = true utf8 = true # Tests the overlapping case of the above. [[test]] name = "empty-utf8yes-anchored-overlapping" regex = '' haystack = '𝛃' matches = [[0, 0]] anchored = true unicode = true utf8 = true match-kind = "all" search-kind = "overlapping" # Same as above, except with UTF-8 mode disabled. It almost doesn't change the # result, except for the fact that since this is an anchored search and we # always find all matches, the test harness will keep reporting matches until # none are found. Because it's anchored, matches will be reported so long as # they are directly adjacent. Since with UTF-8 mode the next anchored search # after the match at [0, 0] fails, iteration stops (and doesn't find the last # match at [4, 4]). [[test]] name = "empty-utf8no-anchored" regex = '' haystack = '𝛃' matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] anchored = true unicode = true utf8 = false # Tests the overlapping case of the above. # # Note that overlapping anchored searches are a little weird, and it's not # totally clear what their semantics ought to be. For now, we just test the # current behavior of our test shim that implements overlapping search. (This # is one of the reasons why we don't really expose regex-level overlapping # searches.) [[test]] name = "empty-utf8no-anchored-overlapping" regex = '' haystack = '𝛃' matches = [[0, 0]] anchored = true unicode = true utf8 = false match-kind = "all" search-kind = "overlapping" # In this test, we anchor the search, but also set bounds. The bounds start the # search in the middle of a codepoint, so there should never be a match. [[test]] name = "empty-utf8yes-anchored-bounds" regex = '' haystack = '𝛃' matches = [] bounds = [1, 3] anchored = true unicode = true utf8 = true # Tests the overlapping case of the above. [[test]] name = "empty-utf8yes-anchored-bounds-overlapping" regex = '' haystack = '𝛃' matches = [] bounds = [1, 3] anchored = true unicode = true utf8 = true match-kind = "all" search-kind = "overlapping" # Same as above, except with UTF-8 mode disabled. Without UTF-8 mode enabled, # matching within a codepoint is allowed. And remember, as in the anchored test # above with UTF-8 mode disabled, iteration will report all adjacent matches. # The matches at [0, 0] and [4, 4] are not included because of the bounds of # the search. [[test]] name = "empty-utf8no-anchored-bounds" regex = '' haystack = '𝛃' bounds = [1, 3] matches = [[1, 1], [2, 2], [3, 3]] anchored = true unicode = true utf8 = false # Tests the overlapping case of the above. # # Note that overlapping anchored searches are a little weird, and it's not # totally clear what their semantics ought to be. For now, we just test the # current behavior of our test shim that implements overlapping search. (This # is one of the reasons why we don't really expose regex-level overlapping # searches.) [[test]] name = "empty-utf8no-anchored-bounds-overlapping" regex = '' haystack = '𝛃' bounds = [1, 3] matches = [[1, 1]] anchored = true unicode = true utf8 = false match-kind = "all" search-kind = "overlapping" # This tests that we find the match at the end of the string when the bounds # exclude the first match. [[test]] name = "empty-utf8yes-startbound" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [[4, 4]] unicode = true utf8 = true # Tests the overlapping case of the above. [[test]] name = "empty-utf8yes-startbound-overlapping" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [[4, 4]] unicode = true utf8 = true match-kind = "all" search-kind = "overlapping" # Same as above, except since UTF-8 mode is disabled, we also find the matches # inbetween that split the codepoint. [[test]] name = "empty-utf8no-startbound" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [[1, 1], [2, 2], [3, 3], [4, 4]] unicode = true utf8 = false # Tests the overlapping case of the above. [[test]] name = "empty-utf8no-startbound-overlapping" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [[1, 1], [2, 2], [3, 3], [4, 4]] unicode = true utf8 = false match-kind = "all" search-kind = "overlapping" # This tests that we don't find any matches in an anchored search, even when # the bounds include a match (at the end). [[test]] name = "empty-utf8yes-anchored-startbound" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [] anchored = true unicode = true utf8 = true # Tests the overlapping case of the above. [[test]] name = "empty-utf8yes-anchored-startbound-overlapping" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [] anchored = true unicode = true utf8 = true match-kind = "all" search-kind = "overlapping" # Same as above, except since UTF-8 mode is disabled, we also find the matches # inbetween that split the codepoint. Even though this is an anchored search, # since the matches are adjacent, we find all of them. [[test]] name = "empty-utf8no-anchored-startbound" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [[1, 1], [2, 2], [3, 3], [4, 4]] anchored = true unicode = true utf8 = false # Tests the overlapping case of the above. # # Note that overlapping anchored searches are a little weird, and it's not # totally clear what their semantics ought to be. For now, we just test the # current behavior of our test shim that implements overlapping search. (This # is one of the reasons why we don't really expose regex-level overlapping # searches.) [[test]] name = "empty-utf8no-anchored-startbound-overlapping" regex = '' haystack = '𝛃' bounds = [1, 4] matches = [[1, 1]] anchored = true unicode = true utf8 = false match-kind = "all" search-kind = "overlapping" # This tests that we find the match at the end of the haystack in UTF-8 mode # when our bounds only include the empty string at the end of the haystack. [[test]] name = "empty-utf8yes-anchored-endbound" regex = '' haystack = '𝛃' bounds = [4, 4] matches = [[4, 4]] anchored = true unicode = true utf8 = true # Tests the overlapping case of the above. [[test]] name = "empty-utf8yes-anchored-endbound-overlapping" regex = '' haystack = '𝛃' bounds = [4, 4] matches = [[4, 4]] anchored = true unicode = true utf8 = true match-kind = "all" search-kind = "overlapping" # Same as above, but with UTF-8 mode disabled. Results remain the same since # the only possible match does not split a codepoint. [[test]] name = "empty-utf8no-anchored-endbound" regex = '' haystack = '𝛃' bounds = [4, 4] matches = [[4, 4]] anchored = true unicode = true utf8 = false # Tests the overlapping case of the above. [[test]] name = "empty-utf8no-anchored-endbound-overlapping" regex = '' haystack = '𝛃' bounds = [4, 4] matches = [[4, 4]] anchored = true unicode = true utf8 = false match-kind = "all" search-kind = "overlapping" <file_sep>/regex-test/README.md regex-test ========== This is a small supporting library for reading the regex crate TOML test format. It is not currently intended for others to use this, but if you have a use case, please open an issue. ### Documentation https://docs.rs/regex-test <file_sep>/regex-cli/cmd/mod.rs mod compile_test; mod debug; mod find; mod generate; const USAGE: &'static str = "\ A tool for interacting with Rust's regex crate on the command line. USAGE: regex-cli <command> ... COMMANDS: compile-test Measure binary size and compile time of various configs. debug Print the debug representation of things from regex-automata. find Search haystacks with one of many different regex engines. generate Various generation tasks, e.g., serializing DFAs. "; pub fn run(p: &mut lexopt::Parser) -> anyhow::Result<()> { let cmd = crate::args::next_as_command(USAGE, p)?; match &*cmd { "compile-test" => compile_test::run(p), "find" => find::run(p), "debug" => debug::run(p), "generate" => generate::run(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } <file_sep>/regex-syntax/src/hir/mod.rs /*! Defines a high-level intermediate (HIR) representation for regular expressions. The HIR is represented by the [`Hir`] type, and it principally constructed via [translation](translate) from an [`Ast`](crate::ast::Ast). Alternatively, users may use the smart constructors defined on `Hir` to build their own by hand. The smart constructors simultaneously simplify and "optimize" the HIR, and are also the same routines used by translation. Most regex engines only have an HIR like this, and usually construct it directly from the concrete syntax. This crate however first parses the concrete syntax into an `Ast`, and only then creates the HIR from the `Ast`, as mentioned above. It's done this way to facilitate better error reporting, and to have a structured representation of a regex that faithfully represents its concrete syntax. Namely, while an `Hir` value can be converted back to an equivalent regex pattern string, it is unlikely to look like the original due to its simplified structure. */ use core::{char, cmp}; use alloc::{ boxed::Box, format, string::{String, ToString}, vec, vec::Vec, }; use crate::{ ast::Span, hir::interval::{Interval, IntervalSet, IntervalSetIter}, unicode, }; pub use crate::{ hir::visitor::{visit, Visitor}, unicode::CaseFoldError, }; mod interval; pub mod literal; pub mod print; pub mod translate; mod visitor; /// An error that can occur while translating an `Ast` to a `Hir`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Error { /// The kind of error. kind: ErrorKind, /// The original pattern that the translator's Ast was parsed from. Every /// span in an error is a valid range into this string. pattern: String, /// The span of this error, derived from the Ast given to the translator. span: Span, } impl Error { /// Return the type of this error. pub fn kind(&self) -> &ErrorKind { &self.kind } /// The original pattern string in which this error occurred. /// /// Every span reported by this error is reported in terms of this string. pub fn pattern(&self) -> &str { &self.pattern } /// Return the span at which this error occurred. pub fn span(&self) -> &Span { &self.span } } /// The type of an error that occurred while building an `Hir`. /// /// This error type is marked as `non_exhaustive`. This means that adding a /// new variant is not considered a breaking change. #[non_exhaustive] #[derive(Clone, Debug, Eq, PartialEq)] pub enum ErrorKind { /// This error occurs when a Unicode feature is used when Unicode /// support is disabled. For example `(?-u:\pL)` would trigger this error. UnicodeNotAllowed, /// This error occurs when translating a pattern that could match a byte /// sequence that isn't UTF-8 and `utf8` was enabled. InvalidUtf8, /// This error occurs when one uses a non-ASCII byte for a line terminator, /// but where Unicode mode is enabled and UTF-8 mode is disabled. InvalidLineTerminator, /// This occurs when an unrecognized Unicode property name could not /// be found. UnicodePropertyNotFound, /// This occurs when an unrecognized Unicode property value could not /// be found. UnicodePropertyValueNotFound, /// This occurs when a Unicode-aware Perl character class (`\w`, `\s` or /// `\d`) could not be found. This can occur when the `unicode-perl` /// crate feature is not enabled. UnicodePerlClassNotFound, /// This occurs when the Unicode simple case mapping tables are not /// available, and the regular expression required Unicode aware case /// insensitivity. UnicodeCaseUnavailable, } #[cfg(feature = "std")] impl std::error::Error for Error {} impl core::fmt::Display for Error { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { crate::error::Formatter::from(self).fmt(f) } } impl core::fmt::Display for ErrorKind { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { use self::ErrorKind::*; let msg = match *self { UnicodeNotAllowed => "Unicode not allowed here", InvalidUtf8 => "pattern can match invalid UTF-8", InvalidLineTerminator => "invalid line terminator, must be ASCII", UnicodePropertyNotFound => "Unicode property not found", UnicodePropertyValueNotFound => "Unicode property value not found", UnicodePerlClassNotFound => { "Unicode-aware Perl class not found \ (make sure the unicode-perl feature is enabled)" } UnicodeCaseUnavailable => { "Unicode-aware case insensitivity matching is not available \ (make sure the unicode-case feature is enabled)" } }; f.write_str(msg) } } /// A high-level intermediate representation (HIR) for a regular expression. /// /// An HIR value is a combination of a [`HirKind`] and a set of [`Properties`]. /// An `HirKind` indicates what kind of regular expression it is (a literal, /// a repetition, a look-around assertion, etc.), where as a `Properties` /// describes various facts about the regular expression. For example, whether /// it matches UTF-8 or if it matches the empty string. /// /// The HIR of a regular expression represents an intermediate step between /// its abstract syntax (a structured description of the concrete syntax) and /// an actual regex matcher. The purpose of HIR is to make regular expressions /// easier to analyze. In particular, the AST is much more complex than the /// HIR. For example, while an AST supports arbitrarily nested character /// classes, the HIR will flatten all nested classes into a single set. The HIR /// will also "compile away" every flag present in the concrete syntax. For /// example, users of HIR expressions never need to worry about case folding; /// it is handled automatically by the translator (e.g., by translating /// `(?i:A)` to `[aA]`). /// /// The specific type of an HIR expression can be accessed via its `kind` /// or `into_kind` methods. This extra level of indirection exists for two /// reasons: /// /// 1. Construction of an HIR expression *must* use the constructor methods on /// this `Hir` type instead of building the `HirKind` values directly. This /// permits construction to enforce invariants like "concatenations always /// consist of two or more sub-expressions." /// 2. Every HIR expression contains attributes that are defined inductively, /// and can be computed cheaply during the construction process. For example, /// one such attribute is whether the expression must match at the beginning of /// the haystack. /// /// In particular, if you have an `HirKind` value, then there is intentionally /// no way to build an `Hir` value from it. You instead need to do case /// analysis on the `HirKind` value and build the `Hir` value using its smart /// constructors. /// /// # UTF-8 /// /// If the HIR was produced by a translator with /// [`TranslatorBuilder::utf8`](translate::TranslatorBuilder::utf8) enabled, /// then the HIR is guaranteed to match UTF-8 exclusively for all non-empty /// matches. /// /// For empty matches, those can occur at any position. It is the /// responsibility of the regex engine to determine whether empty matches are /// permitted between the code units of a single codepoint. /// /// # Stack space /// /// This type defines its own destructor that uses constant stack space and /// heap space proportional to the size of the HIR. /// /// Also, an `Hir`'s `fmt::Display` implementation prints an HIR as a regular /// expression pattern string, and uses constant stack space and heap space /// proportional to the size of the `Hir`. The regex it prints is guaranteed to /// be _semantically_ equivalent to the original concrete syntax, but it may /// look very different. (And potentially not practically readable by a human.) /// /// An `Hir`'s `fmt::Debug` implementation currently does not use constant /// stack space. The implementation will also suppress some details (such as /// the `Properties` inlined into every `Hir` value to make it less noisy). #[derive(Clone, Eq, PartialEq)] pub struct Hir { /// The underlying HIR kind. kind: HirKind, /// Analysis info about this HIR, computed during construction. props: Properties, } /// Methods for accessing the underlying `HirKind` and `Properties`. impl Hir { /// Returns a reference to the underlying HIR kind. pub fn kind(&self) -> &HirKind { &self.kind } /// Consumes ownership of this HIR expression and returns its underlying /// `HirKind`. pub fn into_kind(mut self) -> HirKind { core::mem::replace(&mut self.kind, HirKind::Empty) } /// Returns the properties computed for this `Hir`. pub fn properties(&self) -> &Properties { &self.props } /// Splits this HIR into its constituent parts. /// /// This is useful because `let Hir { kind, props } = hir;` does not work /// because of `Hir`'s custom `Drop` implementation. fn into_parts(mut self) -> (HirKind, Properties) { ( core::mem::replace(&mut self.kind, HirKind::Empty), core::mem::replace(&mut self.props, Properties::empty()), ) } } /// Smart constructors for HIR values. /// /// These constructors are called "smart" because they do inductive work or /// simplifications. For example, calling `Hir::repetition` with a repetition /// like `a{0}` will actually return a `Hir` with a `HirKind::Empty` kind /// since it is equivalent to an empty regex. Another example is calling /// `Hir::concat(vec![expr])`. Instead of getting a `HirKind::Concat`, you'll /// just get back the original `expr` since it's precisely equivalent. /// /// Smart constructors enable maintaining invariants about the HIR data type /// while also simulanteously keeping the representation as simple as possible. impl Hir { /// Returns an empty HIR expression. /// /// An empty HIR expression always matches, including the empty string. #[inline] pub fn empty() -> Hir { let props = Properties::empty(); Hir { kind: HirKind::Empty, props } } /// Returns an HIR expression that can never match anything. That is, /// the size of the set of strings in the language described by the HIR /// returned is `0`. /// /// This is distinct from [`Hir::empty`] in that the empty string matches /// the HIR returned by `Hir::empty`. That is, the set of strings in the /// language describe described by `Hir::empty` is non-empty. /// /// Note that currently, the HIR returned uses an empty character class to /// indicate that nothing can match. An equivalent expression that cannot /// match is an empty alternation, but all such "fail" expressions are /// normalized (via smart constructors) to empty character classes. This is /// because empty character classes can be spelled in the concrete syntax /// of a regex (e.g., `\P{any}` or `(?-u:[^\x00-\xFF])` or `[a&&b]`), but /// empty alternations cannot. #[inline] pub fn fail() -> Hir { let class = Class::Bytes(ClassBytes::empty()); let props = Properties::class(&class); // We can't just call Hir::class here because it defers to Hir::fail // in order to canonicalize the Hir value used to represent "cannot // match." Hir { kind: HirKind::Class(class), props } } /// Creates a literal HIR expression. /// /// This accepts anything that can be converted into a `Box<[u8]>`. /// /// Note that there is no mechanism for storing a `char` or a `Box<str>` /// in an HIR. Everything is "just bytes." Whether a `Literal` (or /// any HIR node) matches valid UTF-8 exclusively can be queried via /// [`Properties::is_utf8`]. /// /// # Example /// /// This example shows that concatenations of `Literal` HIR values will /// automatically get flattened and combined together. So for example, even /// if you concat multiple `Literal` values that are themselves not valid /// UTF-8, they might add up to valid UTF-8. This also demonstrates just /// how "smart" Hir's smart constructors are. /// /// ``` /// use regex_syntax::hir::{Hir, HirKind, Literal}; /// /// let literals = vec![ /// Hir::literal([0xE2]), /// Hir::literal([0x98]), /// Hir::literal([0x83]), /// ]; /// // Each literal, on its own, is invalid UTF-8. /// assert!(literals.iter().all(|hir| !hir.properties().is_utf8())); /// /// let concat = Hir::concat(literals); /// // But the concatenation is valid UTF-8! /// assert!(concat.properties().is_utf8()); /// /// // And also notice that the literals have been concatenated into a /// // single `Literal`, to the point where there is no explicit `Concat`! /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes()))); /// assert_eq!(&expected, concat.kind()); /// ``` #[inline] pub fn literal<B: Into<Box<[u8]>>>(lit: B) -> Hir { let bytes = lit.into(); if bytes.is_empty() { return Hir::empty(); } let lit = Literal(bytes); let props = Properties::literal(&lit); Hir { kind: HirKind::Literal(lit), props } } /// Creates a class HIR expression. The class may either be defined over /// ranges of Unicode codepoints or ranges of raw byte values. /// /// Note that an empty class is permitted. An empty class is equivalent to /// `Hir::fail()`. #[inline] pub fn class(class: Class) -> Hir { if class.is_empty() { return Hir::fail(); } else if let Some(bytes) = class.literal() { return Hir::literal(bytes); } let props = Properties::class(&class); Hir { kind: HirKind::Class(class), props } } /// Creates a look-around assertion HIR expression. #[inline] pub fn look(look: Look) -> Hir { let props = Properties::look(look); Hir { kind: HirKind::Look(look), props } } /// Creates a repetition HIR expression. #[inline] pub fn repetition(mut rep: Repetition) -> Hir { // If the sub-expression of a repetition can only match the empty // string, then we force its maximum to be at most 1. if rep.sub.properties().maximum_len() == Some(0) { rep.min = cmp::min(rep.min, 1); rep.max = rep.max.map(|n| cmp::min(n, 1)).or(Some(1)); } // The regex 'a{0}' is always equivalent to the empty regex. This is // true even when 'a' is an expression that never matches anything // (like '\P{any}'). // // Additionally, the regex 'a{1}' is always equivalent to 'a'. if rep.min == 0 && rep.max == Some(0) { return Hir::empty(); } else if rep.min == 1 && rep.max == Some(1) { return *rep.sub; } let props = Properties::repetition(&rep); Hir { kind: HirKind::Repetition(rep), props } } /// Creates a capture HIR expression. /// /// Note that there is no explicit HIR value for a non-capturing group. /// Since a non-capturing group only exists to override precedence in the /// concrete syntax and since an HIR already does its own grouping based on /// what is parsed, there is no need to explicitly represent non-capturing /// groups in the HIR. #[inline] pub fn capture(capture: Capture) -> Hir { let props = Properties::capture(&capture); Hir { kind: HirKind::Capture(capture), props } } /// Returns the concatenation of the given expressions. /// /// This attempts to flatten and simplify the concatenation as appropriate. /// /// # Example /// /// This shows a simple example of basic flattening of both concatenations /// and literals. /// /// ``` /// use regex_syntax::hir::Hir; /// /// let hir = Hir::concat(vec![ /// Hir::concat(vec![ /// Hir::literal([b'a']), /// Hir::literal([b'b']), /// Hir::literal([b'c']), /// ]), /// Hir::concat(vec![ /// Hir::literal([b'x']), /// Hir::literal([b'y']), /// Hir::literal([b'z']), /// ]), /// ]); /// let expected = Hir::literal("abcxyz".as_bytes()); /// assert_eq!(expected, hir); /// ``` pub fn concat(subs: Vec<Hir>) -> Hir { // We rebuild the concatenation by simplifying it. Would be nice to do // it in place, but that seems a little tricky? let mut new = vec![]; // This gobbles up any adjacent literals in a concatenation and smushes // them together. Basically, when we see a literal, we add its bytes // to 'prior_lit', and whenever we see anything else, we first take // any bytes in 'prior_lit' and add it to the 'new' concatenation. let mut prior_lit: Option<Vec<u8>> = None; for sub in subs { let (kind, props) = sub.into_parts(); match kind { HirKind::Literal(Literal(bytes)) => { if let Some(ref mut prior_bytes) = prior_lit { prior_bytes.extend_from_slice(&bytes); } else { prior_lit = Some(bytes.to_vec()); } } // We also flatten concats that are direct children of another // concat. We only need to do this one level deep since // Hir::concat is the only way to build concatenations, and so // flattening happens inductively. HirKind::Concat(subs2) => { for sub2 in subs2 { let (kind2, props2) = sub2.into_parts(); match kind2 { HirKind::Literal(Literal(bytes)) => { if let Some(ref mut prior_bytes) = prior_lit { prior_bytes.extend_from_slice(&bytes); } else { prior_lit = Some(bytes.to_vec()); } } kind2 => { if let Some(prior_bytes) = prior_lit.take() { new.push(Hir::literal(prior_bytes)); } new.push(Hir { kind: kind2, props: props2 }); } } } } // We can just skip empty HIRs. HirKind::Empty => {} kind => { if let Some(prior_bytes) = prior_lit.take() { new.push(Hir::literal(prior_bytes)); } new.push(Hir { kind, props }); } } } if let Some(prior_bytes) = prior_lit.take() { new.push(Hir::literal(prior_bytes)); } if new.is_empty() { return Hir::empty(); } else if new.len() == 1 { return new.pop().unwrap(); } let props = Properties::concat(&new); Hir { kind: HirKind::Concat(new), props } } /// Returns the alternation of the given expressions. /// /// This flattens and simplifies the alternation as appropriate. This may /// include factoring out common prefixes or even rewriting the alternation /// as a character class. /// /// Note that an empty alternation is equivalent to `Hir::fail()`. (It /// is not possible for one to write an empty alternation, or even an /// alternation with a single sub-expression, in the concrete syntax of a /// regex.) /// /// # Example /// /// This is a simple example showing how an alternation might get /// simplified. /// /// ``` /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; /// /// let hir = Hir::alternation(vec![ /// Hir::literal([b'a']), /// Hir::literal([b'b']), /// Hir::literal([b'c']), /// Hir::literal([b'd']), /// Hir::literal([b'e']), /// Hir::literal([b'f']), /// ]); /// let expected = Hir::class(Class::Unicode(ClassUnicode::new([ /// ClassUnicodeRange::new('a', 'f'), /// ]))); /// assert_eq!(expected, hir); /// ``` /// /// And another example showing how common prefixes might get factored /// out. /// /// ``` /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; /// /// let hir = Hir::alternation(vec![ /// Hir::concat(vec![ /// Hir::literal("abc".as_bytes()), /// Hir::class(Class::Unicode(ClassUnicode::new([ /// ClassUnicodeRange::new('A', 'Z'), /// ]))), /// ]), /// Hir::concat(vec![ /// Hir::literal("abc".as_bytes()), /// Hir::class(Class::Unicode(ClassUnicode::new([ /// ClassUnicodeRange::new('a', 'z'), /// ]))), /// ]), /// ]); /// let expected = Hir::concat(vec![ /// Hir::literal("abc".as_bytes()), /// Hir::alternation(vec![ /// Hir::class(Class::Unicode(ClassUnicode::new([ /// ClassUnicodeRange::new('A', 'Z'), /// ]))), /// Hir::class(Class::Unicode(ClassUnicode::new([ /// ClassUnicodeRange::new('a', 'z'), /// ]))), /// ]), /// ]); /// assert_eq!(expected, hir); /// ``` /// /// Note that these sorts of simplifications are not guaranteed. pub fn alternation(subs: Vec<Hir>) -> Hir { // We rebuild the alternation by simplifying it. We proceed similarly // as the concatenation case. But in this case, there's no literal // simplification happening. We're just flattening alternations. let mut new = Vec::with_capacity(subs.len()); for sub in subs { let (kind, props) = sub.into_parts(); match kind { HirKind::Alternation(subs2) => { new.extend(subs2); } kind => { new.push(Hir { kind, props }); } } } if new.is_empty() { return Hir::fail(); } else if new.len() == 1 { return new.pop().unwrap(); } // Now that it's completely flattened, look for the special case of // 'char1|char2|...|charN' and collapse that into a class. Note that // we look for 'char' first and then bytes. The issue here is that if // we find both non-ASCII codepoints and non-ASCII singleton bytes, // then it isn't actually possible to smush them into a single class. // (Because classes are either "all codepoints" or "all bytes." You // can have a class that both matches non-ASCII but valid UTF-8 and // invalid UTF-8.) So we look for all chars and then all bytes, and // don't handle anything else. if let Some(singletons) = singleton_chars(&new) { let it = singletons .into_iter() .map(|ch| ClassUnicodeRange { start: ch, end: ch }); return Hir::class(Class::Unicode(ClassUnicode::new(it))); } if let Some(singletons) = singleton_bytes(&new) { let it = singletons .into_iter() .map(|b| ClassBytesRange { start: b, end: b }); return Hir::class(Class::Bytes(ClassBytes::new(it))); } // Similar to singleton chars, we can also look for alternations of // classes. Those can be smushed into a single class. if let Some(cls) = class_chars(&new) { return Hir::class(cls); } if let Some(cls) = class_bytes(&new) { return Hir::class(cls); } // Factor out a common prefix if we can, which might potentially // simplify the expression and unlock other optimizations downstream. // It also might generally make NFA matching and DFA construction // faster by reducing the scope of branching in the regex. new = match lift_common_prefix(new) { Ok(hir) => return hir, Err(unchanged) => unchanged, }; let props = Properties::alternation(&new); Hir { kind: HirKind::Alternation(new), props } } /// Returns an HIR expression for `.`. /// /// * [`Dot::AnyChar`] maps to `(?su-R:.)`. /// * [`Dot::AnyByte`] maps to `(?s-Ru:.)`. /// * [`Dot::AnyCharExceptLF`] maps to `(?u-Rs:.)`. /// * [`Dot::AnyCharExceptCRLF`] maps to `(?Ru-s:.)`. /// * [`Dot::AnyByteExceptLF`] maps to `(?-Rsu:.)`. /// * [`Dot::AnyByteExceptCRLF`] maps to `(?R-su:.)`. /// /// # Example /// /// Note that this is a convenience routine for constructing the correct /// character class based on the value of `Dot`. There is no explicit "dot" /// HIR value. It is just an abbreviation for a common character class. /// /// ``` /// use regex_syntax::hir::{Hir, Dot, Class, ClassBytes, ClassBytesRange}; /// /// let hir = Hir::dot(Dot::AnyByte); /// let expected = Hir::class(Class::Bytes(ClassBytes::new([ /// ClassBytesRange::new(0x00, 0xFF), /// ]))); /// assert_eq!(expected, hir); /// ``` #[inline] pub fn dot(dot: Dot) -> Hir { match dot { Dot::AnyChar => { let mut cls = ClassUnicode::empty(); cls.push(ClassUnicodeRange::new('\0', '\u{10FFFF}')); Hir::class(Class::Unicode(cls)) } Dot::AnyByte => { let mut cls = ClassBytes::empty(); cls.push(ClassBytesRange::new(b'\0', b'\xFF')); Hir::class(Class::Bytes(cls)) } Dot::AnyCharExcept(ch) => { let mut cls = ClassUnicode::new([ClassUnicodeRange::new(ch, ch)]); cls.negate(); Hir::class(Class::Unicode(cls)) } Dot::AnyCharExceptLF => { let mut cls = ClassUnicode::empty(); cls.push(ClassUnicodeRange::new('\0', '\x09')); cls.push(ClassUnicodeRange::new('\x0B', '\u{10FFFF}')); Hir::class(Class::Unicode(cls)) } Dot::AnyCharExceptCRLF => { let mut cls = ClassUnicode::empty(); cls.push(ClassUnicodeRange::new('\0', '\x09')); cls.push(ClassUnicodeRange::new('\x0B', '\x0C')); cls.push(ClassUnicodeRange::new('\x0E', '\u{10FFFF}')); Hir::class(Class::Unicode(cls)) } Dot::AnyByteExcept(byte) => { let mut cls = ClassBytes::new([ClassBytesRange::new(byte, byte)]); cls.negate(); Hir::class(Class::Bytes(cls)) } Dot::AnyByteExceptLF => { let mut cls = ClassBytes::empty(); cls.push(ClassBytesRange::new(b'\0', b'\x09')); cls.push(ClassBytesRange::new(b'\x0B', b'\xFF')); Hir::class(Class::Bytes(cls)) } Dot::AnyByteExceptCRLF => { let mut cls = ClassBytes::empty(); cls.push(ClassBytesRange::new(b'\0', b'\x09')); cls.push(ClassBytesRange::new(b'\x0B', b'\x0C')); cls.push(ClassBytesRange::new(b'\x0E', b'\xFF')); Hir::class(Class::Bytes(cls)) } } } } /// The underlying kind of an arbitrary [`Hir`] expression. /// /// An `HirKind` is principally useful for doing case analysis on the type /// of a regular expression. If you're looking to build new `Hir` values, /// then you _must_ use the smart constructors defined on `Hir`, like /// [`Hir::repetition`], to build new `Hir` values. The API intentionally does /// not expose any way of building an `Hir` directly from an `HirKind`. #[derive(Clone, Debug, Eq, PartialEq)] pub enum HirKind { /// The empty regular expression, which matches everything, including the /// empty string. Empty, /// A literalstring that matches exactly these bytes. Literal(Literal), /// A single character class that matches any of the characters in the /// class. A class can either consist of Unicode scalar values as /// characters, or it can use bytes. /// /// A class may be empty. In which case, it matches nothing. Class(Class), /// A look-around assertion. A look-around match always has zero length. Look(Look), /// A repetition operation applied to a sub-expression. Repetition(Repetition), /// A capturing group, which contains a sub-expression. Capture(Capture), /// A concatenation of expressions. /// /// A concatenation matches only if each of its sub-expressions match one /// after the other. /// /// Concatenations are guaranteed by `Hir`'s smart constructors to always /// have at least two sub-expressions. Concat(Vec<Hir>), /// An alternation of expressions. /// /// An alternation matches only if at least one of its sub-expressions /// match. If multiple sub-expressions match, then the leftmost is /// preferred. /// /// Alternations are guaranteed by `Hir`'s smart constructors to always /// have at least two sub-expressions. Alternation(Vec<Hir>), } impl HirKind { /// Returns a slice of this kind's sub-expressions, if any. pub fn subs(&self) -> &[Hir] { use core::slice::from_ref; match *self { HirKind::Empty | HirKind::Literal(_) | HirKind::Class(_) | HirKind::Look(_) => &[], HirKind::Repetition(Repetition { ref sub, .. }) => from_ref(sub), HirKind::Capture(Capture { ref sub, .. }) => from_ref(sub), HirKind::Concat(ref subs) => subs, HirKind::Alternation(ref subs) => subs, } } } impl core::fmt::Debug for Hir { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.kind.fmt(f) } } /// Print a display representation of this Hir. /// /// The result of this is a valid regular expression pattern string. /// /// This implementation uses constant stack space and heap space proportional /// to the size of the `Hir`. impl core::fmt::Display for Hir { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { crate::hir::print::Printer::new().print(self, f) } } /// The high-level intermediate representation of a literal. /// /// A literal corresponds to `0` or more bytes that should be matched /// literally. The smart constructors defined on `Hir` will automatically /// concatenate adjacent literals into one literal, and will even automatically /// replace empty literals with `Hir::empty()`. /// /// Note that despite a literal being represented by a sequence of bytes, its /// `Debug` implementation will attempt to print it as a normal string. (That /// is, not a sequence of decimal numbers.) #[derive(Clone, Eq, PartialEq)] pub struct Literal(pub Box<[u8]>); impl core::fmt::Debug for Literal { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { crate::debug::Bytes(&self.0).fmt(f) } } /// The high-level intermediate representation of a character class. /// /// A character class corresponds to a set of characters. A character is either /// defined by a Unicode scalar value or a byte. Unicode characters are used /// by default, while bytes are used when Unicode mode (via the `u` flag) is /// disabled. /// /// A character class, regardless of its character type, is represented by a /// sequence of non-overlapping non-adjacent ranges of characters. /// /// Note that `Bytes` variant may be produced even when it exclusively matches /// valid UTF-8. This is because a `Bytes` variant represents an intention by /// the author of the regular expression to disable Unicode mode, which in turn /// impacts the semantics of case insensitive matching. For example, `(?i)k` /// and `(?i-u)k` will not match the same set of strings. #[derive(Clone, Eq, PartialEq)] pub enum Class { /// A set of characters represented by Unicode scalar values. Unicode(ClassUnicode), /// A set of characters represented by arbitrary bytes (one byte per /// character). Bytes(ClassBytes), } impl Class { /// Apply Unicode simple case folding to this character class, in place. /// The character class will be expanded to include all simple case folded /// character variants. /// /// If this is a byte oriented character class, then this will be limited /// to the ASCII ranges `A-Z` and `a-z`. /// /// # Panics /// /// This routine panics when the case mapping data necessary for this /// routine to complete is unavailable. This occurs when the `unicode-case` /// feature is not enabled and the underlying class is Unicode oriented. /// /// Callers should prefer using `try_case_fold_simple` instead, which will /// return an error instead of panicking. pub fn case_fold_simple(&mut self) { match *self { Class::Unicode(ref mut x) => x.case_fold_simple(), Class::Bytes(ref mut x) => x.case_fold_simple(), } } /// Apply Unicode simple case folding to this character class, in place. /// The character class will be expanded to include all simple case folded /// character variants. /// /// If this is a byte oriented character class, then this will be limited /// to the ASCII ranges `A-Z` and `a-z`. /// /// # Error /// /// This routine returns an error when the case mapping data necessary /// for this routine to complete is unavailable. This occurs when the /// `unicode-case` feature is not enabled and the underlying class is /// Unicode oriented. pub fn try_case_fold_simple( &mut self, ) -> core::result::Result<(), CaseFoldError> { match *self { Class::Unicode(ref mut x) => x.try_case_fold_simple()?, Class::Bytes(ref mut x) => x.case_fold_simple(), } Ok(()) } /// Negate this character class in place. /// /// After completion, this character class will contain precisely the /// characters that weren't previously in the class. pub fn negate(&mut self) { match *self { Class::Unicode(ref mut x) => x.negate(), Class::Bytes(ref mut x) => x.negate(), } } /// Returns true if and only if this character class will only ever match /// valid UTF-8. /// /// A character class can match invalid UTF-8 only when the following /// conditions are met: /// /// 1. The translator was configured to permit generating an expression /// that can match invalid UTF-8. (By default, this is disabled.) /// 2. Unicode mode (via the `u` flag) was disabled either in the concrete /// syntax or in the parser builder. By default, Unicode mode is /// enabled. pub fn is_utf8(&self) -> bool { match *self { Class::Unicode(_) => true, Class::Bytes(ref x) => x.is_ascii(), } } /// Returns the length, in bytes, of the smallest string matched by this /// character class. /// /// For non-empty byte oriented classes, this always returns `1`. For /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or /// `4`. For empty classes, `None` is returned. It is impossible for `0` to /// be returned. /// /// # Example /// /// This example shows some examples of regexes and their corresponding /// minimum length, if any. /// /// ``` /// use regex_syntax::{hir::Properties, parse}; /// /// // The empty string has a min length of 0. /// let hir = parse(r"")?; /// assert_eq!(Some(0), hir.properties().minimum_len()); /// // As do other types of regexes that only match the empty string. /// let hir = parse(r"^$\b\B")?; /// assert_eq!(Some(0), hir.properties().minimum_len()); /// // A regex that can match the empty string but match more is still 0. /// let hir = parse(r"a*")?; /// assert_eq!(Some(0), hir.properties().minimum_len()); /// // A regex that matches nothing has no minimum defined. /// let hir = parse(r"[a&&b]")?; /// assert_eq!(None, hir.properties().minimum_len()); /// // Character classes usually have a minimum length of 1. /// let hir = parse(r"\w")?; /// assert_eq!(Some(1), hir.properties().minimum_len()); /// // But sometimes Unicode classes might be bigger! /// let hir = parse(r"\p{Cyrillic}")?; /// assert_eq!(Some(2), hir.properties().minimum_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn minimum_len(&self) -> Option<usize> { match *self { Class::Unicode(ref x) => x.minimum_len(), Class::Bytes(ref x) => x.minimum_len(), } } /// Returns the length, in bytes, of the longest string matched by this /// character class. /// /// For non-empty byte oriented classes, this always returns `1`. For /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or /// `4`. For empty classes, `None` is returned. It is impossible for `0` to /// be returned. /// /// # Example /// /// This example shows some examples of regexes and their corresponding /// maximum length, if any. /// /// ``` /// use regex_syntax::{hir::Properties, parse}; /// /// // The empty string has a max length of 0. /// let hir = parse(r"")?; /// assert_eq!(Some(0), hir.properties().maximum_len()); /// // As do other types of regexes that only match the empty string. /// let hir = parse(r"^$\b\B")?; /// assert_eq!(Some(0), hir.properties().maximum_len()); /// // A regex that matches nothing has no maximum defined. /// let hir = parse(r"[a&&b]")?; /// assert_eq!(None, hir.properties().maximum_len()); /// // Bounded repeats work as you expect. /// let hir = parse(r"x{2,10}")?; /// assert_eq!(Some(10), hir.properties().maximum_len()); /// // An unbounded repeat means there is no maximum. /// let hir = parse(r"x{2,}")?; /// assert_eq!(None, hir.properties().maximum_len()); /// // With Unicode enabled, \w can match up to 4 bytes! /// let hir = parse(r"\w")?; /// assert_eq!(Some(4), hir.properties().maximum_len()); /// // Without Unicode enabled, \w matches at most 1 byte. /// let hir = parse(r"(?-u)\w")?; /// assert_eq!(Some(1), hir.properties().maximum_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn maximum_len(&self) -> Option<usize> { match *self { Class::Unicode(ref x) => x.maximum_len(), Class::Bytes(ref x) => x.maximum_len(), } } /// Returns true if and only if this character class is empty. That is, /// it has no elements. /// /// An empty character can never match anything, including an empty string. pub fn is_empty(&self) -> bool { match *self { Class::Unicode(ref x) => x.ranges().is_empty(), Class::Bytes(ref x) => x.ranges().is_empty(), } } /// If this class consists of exactly one element (whether a codepoint or a /// byte), then return it as a literal byte string. /// /// If this class is empty or contains more than one element, then `None` /// is returned. pub fn literal(&self) -> Option<Vec<u8>> { match *self { Class::Unicode(ref x) => x.literal(), Class::Bytes(ref x) => x.literal(), } } } impl core::fmt::Debug for Class { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { use crate::debug::Byte; let mut fmter = f.debug_set(); match *self { Class::Unicode(ref cls) => { for r in cls.ranges().iter() { fmter.entry(&(r.start..=r.end)); } } Class::Bytes(ref cls) => { for r in cls.ranges().iter() { fmter.entry(&(Byte(r.start)..=Byte(r.end))); } } } fmter.finish() } } /// A set of characters represented by Unicode scalar values. #[derive(Clone, Debug, Eq, PartialEq)] pub struct ClassUnicode { set: IntervalSet<ClassUnicodeRange>, } impl ClassUnicode { /// Create a new class from a sequence of ranges. /// /// The given ranges do not need to be in any specific order, and ranges /// may overlap. Ranges will automatically be sorted into a canonical /// non-overlapping order. pub fn new<I>(ranges: I) -> ClassUnicode where I: IntoIterator<Item = ClassUnicodeRange>, { ClassUnicode { set: IntervalSet::new(ranges) } } /// Create a new class with no ranges. /// /// An empty class matches nothing. That is, it is equivalent to /// [`Hir::fail`]. pub fn empty() -> ClassUnicode { ClassUnicode::new(vec![]) } /// Add a new range to this set. pub fn push(&mut self, range: ClassUnicodeRange) { self.set.push(range); } /// Return an iterator over all ranges in this class. /// /// The iterator yields ranges in ascending order. pub fn iter(&self) -> ClassUnicodeIter<'_> { ClassUnicodeIter(self.set.iter()) } /// Return the underlying ranges as a slice. pub fn ranges(&self) -> &[ClassUnicodeRange] { self.set.intervals() } /// Expand this character class such that it contains all case folded /// characters, according to Unicode's "simple" mapping. For example, if /// this class consists of the range `a-z`, then applying case folding will /// result in the class containing both the ranges `a-z` and `A-Z`. /// /// # Panics /// /// This routine panics when the case mapping data necessary for this /// routine to complete is unavailable. This occurs when the `unicode-case` /// feature is not enabled. /// /// Callers should prefer using `try_case_fold_simple` instead, which will /// return an error instead of panicking. pub fn case_fold_simple(&mut self) { self.set .case_fold_simple() .expect("unicode-case feature must be enabled"); } /// Expand this character class such that it contains all case folded /// characters, according to Unicode's "simple" mapping. For example, if /// this class consists of the range `a-z`, then applying case folding will /// result in the class containing both the ranges `a-z` and `A-Z`. /// /// # Error /// /// This routine returns an error when the case mapping data necessary /// for this routine to complete is unavailable. This occurs when the /// `unicode-case` feature is not enabled. pub fn try_case_fold_simple( &mut self, ) -> core::result::Result<(), CaseFoldError> { self.set.case_fold_simple() } /// Negate this character class. /// /// For all `c` where `c` is a Unicode scalar value, if `c` was in this /// set, then it will not be in this set after negation. pub fn negate(&mut self) { self.set.negate(); } /// Union this character class with the given character class, in place. pub fn union(&mut self, other: &ClassUnicode) { self.set.union(&other.set); } /// Intersect this character class with the given character class, in /// place. pub fn intersect(&mut self, other: &ClassUnicode) { self.set.intersect(&other.set); } /// Subtract the given character class from this character class, in place. pub fn difference(&mut self, other: &ClassUnicode) { self.set.difference(&other.set); } /// Compute the symmetric difference of the given character classes, in /// place. /// /// This computes the symmetric difference of two character classes. This /// removes all elements in this class that are also in the given class, /// but all adds all elements from the given class that aren't in this /// class. That is, the class will contain all elements in either class, /// but will not contain any elements that are in both classes. pub fn symmetric_difference(&mut self, other: &ClassUnicode) { self.set.symmetric_difference(&other.set); } /// Returns true if and only if this character class will either match /// nothing or only ASCII bytes. Stated differently, this returns false /// if and only if this class contains a non-ASCII codepoint. pub fn is_ascii(&self) -> bool { self.set.intervals().last().map_or(true, |r| r.end <= '\x7F') } /// Returns the length, in bytes, of the smallest string matched by this /// character class. /// /// Returns `None` when the class is empty. pub fn minimum_len(&self) -> Option<usize> { let first = self.ranges().get(0)?; // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). Some(first.start.len_utf8()) } /// Returns the length, in bytes, of the longest string matched by this /// character class. /// /// Returns `None` when the class is empty. pub fn maximum_len(&self) -> Option<usize> { let last = self.ranges().last()?; // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). Some(last.end.len_utf8()) } /// If this class consists of exactly one codepoint, then return it as /// a literal byte string. /// /// If this class is empty or contains more than one codepoint, then `None` /// is returned. pub fn literal(&self) -> Option<Vec<u8>> { let rs = self.ranges(); if rs.len() == 1 && rs[0].start == rs[0].end { Some(rs[0].start.encode_utf8(&mut [0; 4]).to_string().into_bytes()) } else { None } } /// If this class consists of only ASCII ranges, then return its /// corresponding and equivalent byte class. pub fn to_byte_class(&self) -> Option<ClassBytes> { if !self.is_ascii() { return None; } Some(ClassBytes::new(self.ranges().iter().map(|r| { // Since we are guaranteed that our codepoint range is ASCII, the // 'u8::try_from' calls below are guaranteed to be correct. ClassBytesRange { start: u8::try_from(r.start).unwrap(), end: u8::try_from(r.end).unwrap(), } }))) } } /// An iterator over all ranges in a Unicode character class. /// /// The lifetime `'a` refers to the lifetime of the underlying class. #[derive(Debug)] pub struct ClassUnicodeIter<'a>(IntervalSetIter<'a, ClassUnicodeRange>); impl<'a> Iterator for ClassUnicodeIter<'a> { type Item = &'a ClassUnicodeRange; fn next(&mut self) -> Option<&'a ClassUnicodeRange> { self.0.next() } } /// A single range of characters represented by Unicode scalar values. /// /// The range is closed. That is, the start and end of the range are included /// in the range. #[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] pub struct ClassUnicodeRange { start: char, end: char, } impl core::fmt::Debug for ClassUnicodeRange { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let start = if !self.start.is_whitespace() && !self.start.is_control() { self.start.to_string() } else { format!("0x{:X}", u32::from(self.start)) }; let end = if !self.end.is_whitespace() && !self.end.is_control() { self.end.to_string() } else { format!("0x{:X}", u32::from(self.end)) }; f.debug_struct("ClassUnicodeRange") .field("start", &start) .field("end", &end) .finish() } } impl Interval for ClassUnicodeRange { type Bound = char; #[inline] fn lower(&self) -> char { self.start } #[inline] fn upper(&self) -> char { self.end } #[inline] fn set_lower(&mut self, bound: char) { self.start = bound; } #[inline] fn set_upper(&mut self, bound: char) { self.end = bound; } /// Apply simple case folding to this Unicode scalar value range. /// /// Additional ranges are appended to the given vector. Canonical ordering /// is *not* maintained in the given vector. fn case_fold_simple( &self, ranges: &mut Vec<ClassUnicodeRange>, ) -> Result<(), unicode::CaseFoldError> { let mut folder = unicode::SimpleCaseFolder::new()?; if !folder.overlaps(self.start, self.end) { return Ok(()); } let (start, end) = (u32::from(self.start), u32::from(self.end)); for cp in (start..=end).filter_map(char::from_u32) { for &cp_folded in folder.mapping(cp) { ranges.push(ClassUnicodeRange::new(cp_folded, cp_folded)); } } Ok(()) } } impl ClassUnicodeRange { /// Create a new Unicode scalar value range for a character class. /// /// The returned range is always in a canonical form. That is, the range /// returned always satisfies the invariant that `start <= end`. pub fn new(start: char, end: char) -> ClassUnicodeRange { ClassUnicodeRange::create(start, end) } /// Return the start of this range. /// /// The start of a range is always less than or equal to the end of the /// range. pub fn start(&self) -> char { self.start } /// Return the end of this range. /// /// The end of a range is always greater than or equal to the start of the /// range. pub fn end(&self) -> char { self.end } /// Returns the number of codepoints in this range. pub fn len(&self) -> usize { let diff = 1 + u32::from(self.end) - u32::from(self.start); // This is likely to panic in 16-bit targets since a usize can only fit // 2^16. It's not clear what to do here, other than to return an error // when building a Unicode class that contains a range whose length // overflows usize. (Which, to be honest, is probably quite common on // 16-bit targets. For example, this would imply that '.' and '\p{any}' // would be impossible to build.) usize::try_from(diff).expect("char class len fits in usize") } } /// A set of characters represented by arbitrary bytes (where one byte /// corresponds to one character). #[derive(Clone, Debug, Eq, PartialEq)] pub struct ClassBytes { set: IntervalSet<ClassBytesRange>, } impl ClassBytes { /// Create a new class from a sequence of ranges. /// /// The given ranges do not need to be in any specific order, and ranges /// may overlap. Ranges will automatically be sorted into a canonical /// non-overlapping order. pub fn new<I>(ranges: I) -> ClassBytes where I: IntoIterator<Item = ClassBytesRange>, { ClassBytes { set: IntervalSet::new(ranges) } } /// Create a new class with no ranges. /// /// An empty class matches nothing. That is, it is equivalent to /// [`Hir::fail`]. pub fn empty() -> ClassBytes { ClassBytes::new(vec![]) } /// Add a new range to this set. pub fn push(&mut self, range: ClassBytesRange) { self.set.push(range); } /// Return an iterator over all ranges in this class. /// /// The iterator yields ranges in ascending order. pub fn iter(&self) -> ClassBytesIter<'_> { ClassBytesIter(self.set.iter()) } /// Return the underlying ranges as a slice. pub fn ranges(&self) -> &[ClassBytesRange] { self.set.intervals() } /// Expand this character class such that it contains all case folded /// characters. For example, if this class consists of the range `a-z`, /// then applying case folding will result in the class containing both the /// ranges `a-z` and `A-Z`. /// /// Note that this only applies ASCII case folding, which is limited to the /// characters `a-z` and `A-Z`. pub fn case_fold_simple(&mut self) { self.set.case_fold_simple().expect("ASCII case folding never fails"); } /// Negate this byte class. /// /// For all `b` where `b` is a any byte, if `b` was in this set, then it /// will not be in this set after negation. pub fn negate(&mut self) { self.set.negate(); } /// Union this byte class with the given byte class, in place. pub fn union(&mut self, other: &ClassBytes) { self.set.union(&other.set); } /// Intersect this byte class with the given byte class, in place. pub fn intersect(&mut self, other: &ClassBytes) { self.set.intersect(&other.set); } /// Subtract the given byte class from this byte class, in place. pub fn difference(&mut self, other: &ClassBytes) { self.set.difference(&other.set); } /// Compute the symmetric difference of the given byte classes, in place. /// /// This computes the symmetric difference of two byte classes. This /// removes all elements in this class that are also in the given class, /// but all adds all elements from the given class that aren't in this /// class. That is, the class will contain all elements in either class, /// but will not contain any elements that are in both classes. pub fn symmetric_difference(&mut self, other: &ClassBytes) { self.set.symmetric_difference(&other.set); } /// Returns true if and only if this character class will either match /// nothing or only ASCII bytes. Stated differently, this returns false /// if and only if this class contains a non-ASCII byte. pub fn is_ascii(&self) -> bool { self.set.intervals().last().map_or(true, |r| r.end <= 0x7F) } /// Returns the length, in bytes, of the smallest string matched by this /// character class. /// /// Returns `None` when the class is empty. pub fn minimum_len(&self) -> Option<usize> { if self.ranges().is_empty() { None } else { Some(1) } } /// Returns the length, in bytes, of the longest string matched by this /// character class. /// /// Returns `None` when the class is empty. pub fn maximum_len(&self) -> Option<usize> { if self.ranges().is_empty() { None } else { Some(1) } } /// If this class consists of exactly one byte, then return it as /// a literal byte string. /// /// If this class is empty or contains more than one byte, then `None` /// is returned. pub fn literal(&self) -> Option<Vec<u8>> { let rs = self.ranges(); if rs.len() == 1 && rs[0].start == rs[0].end { Some(vec![rs[0].start]) } else { None } } /// If this class consists of only ASCII ranges, then return its /// corresponding and equivalent Unicode class. pub fn to_unicode_class(&self) -> Option<ClassUnicode> { if !self.is_ascii() { return None; } Some(ClassUnicode::new(self.ranges().iter().map(|r| { // Since we are guaranteed that our byte range is ASCII, the // 'char::from' calls below are correct and will not erroneously // convert a raw byte value into its corresponding codepoint. ClassUnicodeRange { start: char::from(r.start), end: char::from(r.end), } }))) } } /// An iterator over all ranges in a byte character class. /// /// The lifetime `'a` refers to the lifetime of the underlying class. #[derive(Debug)] pub struct ClassBytesIter<'a>(IntervalSetIter<'a, ClassBytesRange>); impl<'a> Iterator for ClassBytesIter<'a> { type Item = &'a ClassBytesRange; fn next(&mut self) -> Option<&'a ClassBytesRange> { self.0.next() } } /// A single range of characters represented by arbitrary bytes. /// /// The range is closed. That is, the start and end of the range are included /// in the range. #[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] pub struct ClassBytesRange { start: u8, end: u8, } impl Interval for ClassBytesRange { type Bound = u8; #[inline] fn lower(&self) -> u8 { self.start } #[inline] fn upper(&self) -> u8 { self.end } #[inline] fn set_lower(&mut self, bound: u8) { self.start = bound; } #[inline] fn set_upper(&mut self, bound: u8) { self.end = bound; } /// Apply simple case folding to this byte range. Only ASCII case mappings /// (for a-z) are applied. /// /// Additional ranges are appended to the given vector. Canonical ordering /// is *not* maintained in the given vector. fn case_fold_simple( &self, ranges: &mut Vec<ClassBytesRange>, ) -> Result<(), unicode::CaseFoldError> { if !ClassBytesRange::new(b'a', b'z').is_intersection_empty(self) { let lower = cmp::max(self.start, b'a'); let upper = cmp::min(self.end, b'z'); ranges.push(ClassBytesRange::new(lower - 32, upper - 32)); } if !ClassBytesRange::new(b'A', b'Z').is_intersection_empty(self) { let lower = cmp::max(self.start, b'A'); let upper = cmp::min(self.end, b'Z'); ranges.push(ClassBytesRange::new(lower + 32, upper + 32)); } Ok(()) } } impl ClassBytesRange { /// Create a new byte range for a character class. /// /// The returned range is always in a canonical form. That is, the range /// returned always satisfies the invariant that `start <= end`. pub fn new(start: u8, end: u8) -> ClassBytesRange { ClassBytesRange::create(start, end) } /// Return the start of this range. /// /// The start of a range is always less than or equal to the end of the /// range. pub fn start(&self) -> u8 { self.start } /// Return the end of this range. /// /// The end of a range is always greater than or equal to the start of the /// range. pub fn end(&self) -> u8 { self.end } /// Returns the number of bytes in this range. pub fn len(&self) -> usize { usize::from(self.end.checked_sub(self.start).unwrap()) .checked_add(1) .unwrap() } } impl core::fmt::Debug for ClassBytesRange { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("ClassBytesRange") .field("start", &crate::debug::Byte(self.start)) .field("end", &crate::debug::Byte(self.end)) .finish() } } /// The high-level intermediate representation for a look-around assertion. /// /// An assertion match is always zero-length. Also called an "empty match." #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Look { /// Match the beginning of text. Specifically, this matches at the starting /// position of the input. Start = 1 << 0, /// Match the end of text. Specifically, this matches at the ending /// position of the input. End = 1 << 1, /// Match the beginning of a line or the beginning of text. Specifically, /// this matches at the starting position of the input, or at the position /// immediately following a `\n` character. StartLF = 1 << 2, /// Match the end of a line or the end of text. Specifically, this matches /// at the end position of the input, or at the position immediately /// preceding a `\n` character. EndLF = 1 << 3, /// Match the beginning of a line or the beginning of text. Specifically, /// this matches at the starting position of the input, or at the position /// immediately following either a `\r` or `\n` character, but never after /// a `\r` when a `\n` follows. StartCRLF = 1 << 4, /// Match the end of a line or the end of text. Specifically, this matches /// at the end position of the input, or at the position immediately /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` /// precedes it. EndCRLF = 1 << 5, /// Match an ASCII-only word boundary. That is, this matches a position /// where the left adjacent character and right adjacent character /// correspond to a word and non-word or a non-word and word character. WordAscii = 1 << 6, /// Match an ASCII-only negation of a word boundary. WordAsciiNegate = 1 << 7, /// Match a Unicode-aware word boundary. That is, this matches a position /// where the left adjacent character and right adjacent character /// correspond to a word and non-word or a non-word and word character. WordUnicode = 1 << 8, /// Match a Unicode-aware negation of a word boundary. WordUnicodeNegate = 1 << 9, } impl Look { /// Flip the look-around assertion to its equivalent for reverse searches. /// For example, `StartLF` gets translated to `EndLF`. /// /// Some assertions, such as `WordUnicode`, remain the same since they /// match the same positions regardless of the direction of the search. #[inline] pub const fn reversed(self) -> Look { match self { Look::Start => Look::End, Look::End => Look::Start, Look::StartLF => Look::EndLF, Look::EndLF => Look::StartLF, Look::StartCRLF => Look::EndCRLF, Look::EndCRLF => Look::StartCRLF, Look::WordAscii => Look::WordAscii, Look::WordAsciiNegate => Look::WordAsciiNegate, Look::WordUnicode => Look::WordUnicode, Look::WordUnicodeNegate => Look::WordUnicodeNegate, } } /// Return the underlying representation of this look-around enumeration /// as an integer. Giving the return value to the [`Look::from_repr`] /// constructor is guaranteed to return the same look-around variant that /// one started with within a semver compatible release of this crate. #[inline] pub const fn as_repr(self) -> u16 { // AFAIK, 'as' is the only way to zero-cost convert an int enum to an // actual int. self as u16 } /// Given the underlying representation of a `Look` value, return the /// corresponding `Look` value if the representation is valid. Otherwise /// `None` is returned. #[inline] pub const fn from_repr(repr: u16) -> Option<Look> { match repr { 0b00_0000_0001 => Some(Look::Start), 0b00_0000_0010 => Some(Look::End), 0b00_0000_0100 => Some(Look::StartLF), 0b00_0000_1000 => Some(Look::EndLF), 0b00_0001_0000 => Some(Look::StartCRLF), 0b00_0010_0000 => Some(Look::EndCRLF), 0b00_0100_0000 => Some(Look::WordAscii), 0b00_1000_0000 => Some(Look::WordAsciiNegate), 0b01_0000_0000 => Some(Look::WordUnicode), 0b10_0000_0000 => Some(Look::WordUnicodeNegate), _ => None, } } /// Returns a convenient single codepoint representation of this /// look-around assertion. Each assertion is guaranteed to be represented /// by a distinct character. /// /// This is useful for succinctly representing a look-around assertion in /// human friendly but succinct output intended for a programmer working on /// regex internals. #[inline] pub const fn as_char(self) -> char { match self { Look::Start => 'A', Look::End => 'z', Look::StartLF => '^', Look::EndLF => '$', Look::StartCRLF => 'r', Look::EndCRLF => 'R', Look::WordAscii => 'b', Look::WordAsciiNegate => 'B', Look::WordUnicode => '𝛃', Look::WordUnicodeNegate => '𝚩', } } } /// The high-level intermediate representation for a capturing group. /// /// A capturing group always has an index and a child expression. It may /// also have a name associated with it (e.g., `(?P<foo>\w)`), but it's not /// necessary. /// /// Note that there is no explicit representation of a non-capturing group /// in a `Hir`. Instead, non-capturing grouping is handled automatically by /// the recursive structure of the `Hir` itself. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Capture { /// The capture index of the capture. pub index: u32, /// The name of the capture, if it exists. pub name: Option<Box<str>>, /// The expression inside the capturing group, which may be empty. pub sub: Box<Hir>, } /// The high-level intermediate representation of a repetition operator. /// /// A repetition operator permits the repetition of an arbitrary /// sub-expression. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Repetition { /// The minimum range of the repetition. /// /// Note that special cases like `?`, `+` and `*` all get translated into /// the ranges `{0,1}`, `{1,}` and `{0,}`, respectively. /// /// When `min` is zero, this expression can match the empty string /// regardless of what its sub-expression is. pub min: u32, /// The maximum range of the repetition. /// /// Note that when `max` is `None`, `min` acts as a lower bound but where /// there is no upper bound. For something like `x{5}` where the min and /// max are equivalent, `min` will be set to `5` and `max` will be set to /// `Some(5)`. pub max: Option<u32>, /// Whether this repetition operator is greedy or not. A greedy operator /// will match as much as it can. A non-greedy operator will match as /// little as it can. /// /// Typically, operators are greedy by default and are only non-greedy when /// a `?` suffix is used, e.g., `(expr)*` is greedy while `(expr)*?` is /// not. However, this can be inverted via the `U` "ungreedy" flag. pub greedy: bool, /// The expression being repeated. pub sub: Box<Hir>, } impl Repetition { /// Returns a new repetition with the same `min`, `max` and `greedy` /// values, but with its sub-expression replaced with the one given. pub fn with(&self, sub: Hir) -> Repetition { Repetition { min: self.min, max: self.max, greedy: self.greedy, sub: Box::new(sub), } } } /// A type describing the different flavors of `.`. /// /// This type is meant to be used with [`Hir::dot`], which is a convenience /// routine for building HIR values derived from the `.` regex. #[non_exhaustive] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Dot { /// Matches the UTF-8 encoding of any Unicode scalar value. /// /// This is equivalent to `(?su:.)` and also `\p{any}`. AnyChar, /// Matches any byte value. /// /// This is equivalent to `(?s-u:.)` and also `(?-u:[\x00-\xFF])`. AnyByte, /// Matches the UTF-8 encoding of any Unicode scalar value except for the /// `char` given. /// /// This is equivalent to using `(?u-s:.)` with the line terminator set /// to a particular ASCII byte. (Because of peculiarities in the regex /// engines, a line terminator must be a single byte. It follows that when /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar /// value. That is, ti must be ASCII.) /// /// (This and `AnyCharExceptLF` both exist because of legacy reasons. /// `AnyCharExceptLF` will be dropped in the next breaking change release.) AnyCharExcept(char), /// Matches the UTF-8 encoding of any Unicode scalar value except for `\n`. /// /// This is equivalent to `(?u-s:.)` and also `[\p{any}--\n]`. AnyCharExceptLF, /// Matches the UTF-8 encoding of any Unicode scalar value except for `\r` /// and `\n`. /// /// This is equivalent to `(?uR-s:.)` and also `[\p{any}--\r\n]`. AnyCharExceptCRLF, /// Matches any byte value except for the `u8` given. /// /// This is equivalent to using `(?-us:.)` with the line terminator set /// to a particular ASCII byte. (Because of peculiarities in the regex /// engines, a line terminator must be a single byte. It follows that when /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar /// value. That is, ti must be ASCII.) /// /// (This and `AnyByteExceptLF` both exist because of legacy reasons. /// `AnyByteExceptLF` will be dropped in the next breaking change release.) AnyByteExcept(u8), /// Matches any byte value except for `\n`. /// /// This is equivalent to `(?-su:.)` and also `(?-u:[[\x00-\xFF]--\n])`. AnyByteExceptLF, /// Matches any byte value except for `\r` and `\n`. /// /// This is equivalent to `(?R-su:.)` and also `(?-u:[[\x00-\xFF]--\r\n])`. AnyByteExceptCRLF, } /// A custom `Drop` impl is used for `HirKind` such that it uses constant stack /// space but heap space proportional to the depth of the total `Hir`. impl Drop for Hir { fn drop(&mut self) { use core::mem; match *self.kind() { HirKind::Empty | HirKind::Literal(_) | HirKind::Class(_) | HirKind::Look(_) => return, HirKind::Capture(ref x) if x.sub.kind.subs().is_empty() => return, HirKind::Repetition(ref x) if x.sub.kind.subs().is_empty() => { return } HirKind::Concat(ref x) if x.is_empty() => return, HirKind::Alternation(ref x) if x.is_empty() => return, _ => {} } let mut stack = vec![mem::replace(self, Hir::empty())]; while let Some(mut expr) = stack.pop() { match expr.kind { HirKind::Empty | HirKind::Literal(_) | HirKind::Class(_) | HirKind::Look(_) => {} HirKind::Capture(ref mut x) => { stack.push(mem::replace(&mut x.sub, Hir::empty())); } HirKind::Repetition(ref mut x) => { stack.push(mem::replace(&mut x.sub, Hir::empty())); } HirKind::Concat(ref mut x) => { stack.extend(x.drain(..)); } HirKind::Alternation(ref mut x) => { stack.extend(x.drain(..)); } } } } } /// A type that collects various properties of an HIR value. /// /// Properties are always scalar values and represent meta data that is /// computed inductively on an HIR value. Properties are defined for all /// HIR values. /// /// All methods on a `Properties` value take constant time and are meant to /// be cheap to call. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Properties(Box<PropertiesI>); /// The property definition. It is split out so that we can box it, and /// there by make `Properties` use less stack size. This is kind-of important /// because every HIR value has a `Properties` attached to it. /// /// This does have the unfortunate consequence that creating any HIR value /// always leads to at least one alloc for properties, but this is generally /// true anyway (for pretty much all HirKinds except for look-arounds). #[derive(Clone, Debug, Eq, PartialEq)] struct PropertiesI { minimum_len: Option<usize>, maximum_len: Option<usize>, look_set: LookSet, look_set_prefix: LookSet, look_set_suffix: LookSet, look_set_prefix_any: LookSet, look_set_suffix_any: LookSet, utf8: bool, explicit_captures_len: usize, static_explicit_captures_len: Option<usize>, literal: bool, alternation_literal: bool, } impl Properties { /// Returns the length (in bytes) of the smallest string matched by this /// HIR. /// /// A return value of `0` is possible and occurs when the HIR can match an /// empty string. /// /// `None` is returned when there is no minimum length. This occurs in /// precisely the cases where the HIR matches nothing. i.e., The language /// the regex matches is empty. An example of such a regex is `\P{any}`. #[inline] pub fn minimum_len(&self) -> Option<usize> { self.0.minimum_len } /// Returns the length (in bytes) of the longest string matched by this /// HIR. /// /// A return value of `0` is possible and occurs when nothing longer than /// the empty string is in the language described by this HIR. /// /// `None` is returned when there is no longest matching string. This /// occurs when the HIR matches nothing or when there is no upper bound on /// the length of matching strings. Example of such regexes are `\P{any}` /// (matches nothing) and `a+` (has no upper bound). #[inline] pub fn maximum_len(&self) -> Option<usize> { self.0.maximum_len } /// Returns a set of all look-around assertions that appear at least once /// in this HIR value. #[inline] pub fn look_set(&self) -> LookSet { self.0.look_set } /// Returns a set of all look-around assertions that appear as a prefix for /// this HIR value. That is, the set returned corresponds to the set of /// assertions that must be passed before matching any bytes in a haystack. /// /// For example, `hir.look_set_prefix().contains(Look::Start)` returns true /// if and only if the HIR is fully anchored at the start. #[inline] pub fn look_set_prefix(&self) -> LookSet { self.0.look_set_prefix } /// Returns a set of all look-around assertions that appear as a _possible_ /// prefix for this HIR value. That is, the set returned corresponds to the /// set of assertions that _may_ be passed before matching any bytes in a /// haystack. /// /// For example, `hir.look_set_prefix_any().contains(Look::Start)` returns /// true if and only if it's possible for the regex to match through a /// anchored assertion before consuming any input. #[inline] pub fn look_set_prefix_any(&self) -> LookSet { self.0.look_set_prefix_any } /// Returns a set of all look-around assertions that appear as a suffix for /// this HIR value. That is, the set returned corresponds to the set of /// assertions that must be passed in order to be considered a match after /// all other consuming HIR expressions. /// /// For example, `hir.look_set_suffix().contains(Look::End)` returns true /// if and only if the HIR is fully anchored at the end. #[inline] pub fn look_set_suffix(&self) -> LookSet { self.0.look_set_suffix } /// Returns a set of all look-around assertions that appear as a _possible_ /// suffix for this HIR value. That is, the set returned corresponds to the /// set of assertions that _may_ be passed before matching any bytes in a /// haystack. /// /// For example, `hir.look_set_suffix_any().contains(Look::End)` returns /// true if and only if it's possible for the regex to match through a /// anchored assertion at the end of a match without consuming any input. #[inline] pub fn look_set_suffix_any(&self) -> LookSet { self.0.look_set_suffix_any } /// Return true if and only if the corresponding HIR will always match /// valid UTF-8. /// /// When this returns false, then it is possible for this HIR expression to /// match invalid UTF-8, including by matching between the code units of /// a single UTF-8 encoded codepoint. /// /// Note that this returns true even when the corresponding HIR can match /// the empty string. Since an empty string can technically appear between /// UTF-8 code units, it is possible for a match to be reported that splits /// a codepoint which could in turn be considered matching invalid UTF-8. /// However, it is generally assumed that such empty matches are handled /// specially by the search routine if it is absolutely required that /// matches not split a codepoint. /// /// # Example /// /// This code example shows the UTF-8 property of a variety of patterns. /// /// ``` /// use regex_syntax::{ParserBuilder, parse}; /// /// // Examples of 'is_utf8() == true'. /// assert!(parse(r"a")?.properties().is_utf8()); /// assert!(parse(r"[^a]")?.properties().is_utf8()); /// assert!(parse(r".")?.properties().is_utf8()); /// assert!(parse(r"\W")?.properties().is_utf8()); /// assert!(parse(r"\b")?.properties().is_utf8()); /// assert!(parse(r"\B")?.properties().is_utf8()); /// assert!(parse(r"(?-u)\b")?.properties().is_utf8()); /// assert!(parse(r"(?-u)\B")?.properties().is_utf8()); /// // Unicode mode is enabled by default, and in /// // that mode, all \x hex escapes are treated as /// // codepoints. So this actually matches the UTF-8 /// // encoding of U+00FF. /// assert!(parse(r"\xFF")?.properties().is_utf8()); /// /// // Now we show examples of 'is_utf8() == false'. /// // The only way to do this is to force the parser /// // to permit invalid UTF-8, otherwise all of these /// // would fail to parse! /// let parse = |pattern| { /// ParserBuilder::new().utf8(false).build().parse(pattern) /// }; /// assert!(!parse(r"(?-u)[^a]")?.properties().is_utf8()); /// assert!(!parse(r"(?-u).")?.properties().is_utf8()); /// assert!(!parse(r"(?-u)\W")?.properties().is_utf8()); /// // Conversely to the equivalent example above, /// // when Unicode mode is disabled, \x hex escapes /// // are treated as their raw byte values. /// assert!(!parse(r"(?-u)\xFF")?.properties().is_utf8()); /// // Note that just because we disabled UTF-8 in the /// // parser doesn't mean we still can't use Unicode. /// // It is enabled by default, so \xFF is still /// // equivalent to matching the UTF-8 encoding of /// // U+00FF by default. /// assert!(parse(r"\xFF")?.properties().is_utf8()); /// // Even though we use raw bytes that individually /// // are not valid UTF-8, when combined together, the /// // overall expression *does* match valid UTF-8! /// assert!(parse(r"(?-u)\xE2\x98\x83")?.properties().is_utf8()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_utf8(&self) -> bool { self.0.utf8 } /// Returns the total number of explicit capturing groups in the /// corresponding HIR. /// /// Note that this does not include the implicit capturing group /// corresponding to the entire match that is typically included by regex /// engines. /// /// # Example /// /// This method will return `0` for `a` and `1` for `(a)`: /// /// ``` /// use regex_syntax::parse; /// /// assert_eq!(0, parse("a")?.properties().explicit_captures_len()); /// assert_eq!(1, parse("(a)")?.properties().explicit_captures_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn explicit_captures_len(&self) -> usize { self.0.explicit_captures_len } /// Returns the total number of explicit capturing groups that appear in /// every possible match. /// /// If the number of capture groups can vary depending on the match, then /// this returns `None`. That is, a value is only returned when the number /// of matching groups is invariant or "static." /// /// Note that this does not include the implicit capturing group /// corresponding to the entire match. /// /// # Example /// /// This shows a few cases where a static number of capture groups is /// available and a few cases where it is not. /// /// ``` /// use regex_syntax::parse; /// /// let len = |pattern| { /// parse(pattern).map(|h| { /// h.properties().static_explicit_captures_len() /// }) /// }; /// /// assert_eq!(Some(0), len("a")?); /// assert_eq!(Some(1), len("(a)")?); /// assert_eq!(Some(1), len("(a)|(b)")?); /// assert_eq!(Some(2), len("(a)(b)|(c)(d)")?); /// assert_eq!(None, len("(a)|b")?); /// assert_eq!(None, len("a|(b)")?); /// assert_eq!(None, len("(b)*")?); /// assert_eq!(Some(1), len("(b)+")?); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn static_explicit_captures_len(&self) -> Option<usize> { self.0.static_explicit_captures_len } /// Return true if and only if this HIR is a simple literal. This is /// only true when this HIR expression is either itself a `Literal` or a /// concatenation of only `Literal`s. /// /// For example, `f` and `foo` are literals, but `f+`, `(foo)`, `foo()` and /// the empty string are not (even though they contain sub-expressions that /// are literals). #[inline] pub fn is_literal(&self) -> bool { self.0.literal } /// Return true if and only if this HIR is either a simple literal or an /// alternation of simple literals. This is only /// true when this HIR expression is either itself a `Literal` or a /// concatenation of only `Literal`s or an alternation of only `Literal`s. /// /// For example, `f`, `foo`, `a|b|c`, and `foo|bar|baz` are alternation /// literals, but `f+`, `(foo)`, `foo()`, and the empty pattern are not /// (even though that contain sub-expressions that are literals). #[inline] pub fn is_alternation_literal(&self) -> bool { self.0.alternation_literal } /// Returns the total amount of heap memory usage, in bytes, used by this /// `Properties` value. #[inline] pub fn memory_usage(&self) -> usize { core::mem::size_of::<PropertiesI>() } /// Returns a new set of properties that corresponds to the union of the /// iterator of properties given. /// /// This is useful when one has multiple `Hir` expressions and wants /// to combine them into a single alternation without constructing the /// corresponding `Hir`. This routine provides a way of combining the /// properties of each `Hir` expression into one set of properties /// representing the union of those expressions. /// /// # Example: union with HIRs that never match /// /// This example shows that unioning properties together with one that /// represents a regex that never matches will "poison" certain attributes, /// like the minimum and maximum lengths. /// /// ``` /// use regex_syntax::{hir::Properties, parse}; /// /// let hir1 = parse("ab?c?")?; /// assert_eq!(Some(1), hir1.properties().minimum_len()); /// assert_eq!(Some(3), hir1.properties().maximum_len()); /// /// let hir2 = parse(r"[a&&b]")?; /// assert_eq!(None, hir2.properties().minimum_len()); /// assert_eq!(None, hir2.properties().maximum_len()); /// /// let hir3 = parse(r"wxy?z?")?; /// assert_eq!(Some(2), hir3.properties().minimum_len()); /// assert_eq!(Some(4), hir3.properties().maximum_len()); /// /// let unioned = Properties::union([ /// hir1.properties(), /// hir2.properties(), /// hir3.properties(), /// ]); /// assert_eq!(None, unioned.minimum_len()); /// assert_eq!(None, unioned.maximum_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// The maximum length can also be "poisoned" by a pattern that has no /// upper bound on the length of a match. The minimum length remains /// unaffected: /// /// ``` /// use regex_syntax::{hir::Properties, parse}; /// /// let hir1 = parse("ab?c?")?; /// assert_eq!(Some(1), hir1.properties().minimum_len()); /// assert_eq!(Some(3), hir1.properties().maximum_len()); /// /// let hir2 = parse(r"a+")?; /// assert_eq!(Some(1), hir2.properties().minimum_len()); /// assert_eq!(None, hir2.properties().maximum_len()); /// /// let hir3 = parse(r"wxy?z?")?; /// assert_eq!(Some(2), hir3.properties().minimum_len()); /// assert_eq!(Some(4), hir3.properties().maximum_len()); /// /// let unioned = Properties::union([ /// hir1.properties(), /// hir2.properties(), /// hir3.properties(), /// ]); /// assert_eq!(Some(1), unioned.minimum_len()); /// assert_eq!(None, unioned.maximum_len()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn union<I, P>(props: I) -> Properties where I: IntoIterator<Item = P>, P: core::borrow::Borrow<Properties>, { let mut it = props.into_iter().peekable(); // While empty alternations aren't possible, we still behave as if they // are. When we have an empty alternate, then clearly the look-around // prefix and suffix is empty. Otherwise, it is the intersection of all // prefixes and suffixes (respectively) of the branches. let fix = if it.peek().is_none() { LookSet::empty() } else { LookSet::full() }; // And also, an empty alternate means we have 0 static capture groups, // but we otherwise start with the number corresponding to the first // alternate. If any subsequent alternate has a different number of // static capture groups, then we overall have a variation and not a // static number of groups. let static_explicit_captures_len = it.peek().and_then(|p| p.borrow().static_explicit_captures_len()); // The base case is an empty alternation, which matches nothing. // Note though that empty alternations aren't possible, because the // Hir::alternation smart constructor rewrites those as empty character // classes. let mut props = PropertiesI { minimum_len: None, maximum_len: None, look_set: LookSet::empty(), look_set_prefix: fix, look_set_suffix: fix, look_set_prefix_any: LookSet::empty(), look_set_suffix_any: LookSet::empty(), utf8: true, explicit_captures_len: 0, static_explicit_captures_len, literal: false, alternation_literal: true, }; let (mut min_poisoned, mut max_poisoned) = (false, false); // Handle properties that need to visit every child hir. for prop in it { let p = prop.borrow(); props.look_set.set_union(p.look_set()); props.look_set_prefix.set_intersect(p.look_set_prefix()); props.look_set_suffix.set_intersect(p.look_set_suffix()); props.look_set_prefix_any.set_union(p.look_set_prefix_any()); props.look_set_suffix_any.set_union(p.look_set_suffix_any()); props.utf8 = props.utf8 && p.is_utf8(); props.explicit_captures_len = props .explicit_captures_len .saturating_add(p.explicit_captures_len()); if props.static_explicit_captures_len != p.static_explicit_captures_len() { props.static_explicit_captures_len = None; } props.alternation_literal = props.alternation_literal && p.is_literal(); if !min_poisoned { if let Some(xmin) = p.minimum_len() { if props.minimum_len.map_or(true, |pmin| xmin < pmin) { props.minimum_len = Some(xmin); } } else { props.minimum_len = None; min_poisoned = true; } } if !max_poisoned { if let Some(xmax) = p.maximum_len() { if props.maximum_len.map_or(true, |pmax| xmax > pmax) { props.maximum_len = Some(xmax); } } else { props.maximum_len = None; max_poisoned = true; } } } Properties(Box::new(props)) } } impl Properties { /// Create a new set of HIR properties for an empty regex. fn empty() -> Properties { let inner = PropertiesI { minimum_len: Some(0), maximum_len: Some(0), look_set: LookSet::empty(), look_set_prefix: LookSet::empty(), look_set_suffix: LookSet::empty(), look_set_prefix_any: LookSet::empty(), look_set_suffix_any: LookSet::empty(), // It is debatable whether an empty regex always matches at valid // UTF-8 boundaries. Strictly speaking, at a byte oriented view, // it is clearly false. There are, for example, many empty strings // between the bytes encoding a '☃'. // // However, when Unicode mode is enabled, the fundamental atom // of matching is really a codepoint. And in that scenario, an // empty regex is defined to only match at valid UTF-8 boundaries // and to never split a codepoint. It just so happens that this // enforcement is somewhat tricky to do for regexes that match // the empty string inside regex engines themselves. It usually // requires some layer above the regex engine to filter out such // matches. // // In any case, 'true' is really the only coherent option. If it // were false, for example, then 'a*' would also need to be false // since it too can match the empty string. utf8: true, explicit_captures_len: 0, static_explicit_captures_len: Some(0), literal: false, alternation_literal: false, }; Properties(Box::new(inner)) } /// Create a new set of HIR properties for a literal regex. fn literal(lit: &Literal) -> Properties { let inner = PropertiesI { minimum_len: Some(lit.0.len()), maximum_len: Some(lit.0.len()), look_set: LookSet::empty(), look_set_prefix: LookSet::empty(), look_set_suffix: LookSet::empty(), look_set_prefix_any: LookSet::empty(), look_set_suffix_any: LookSet::empty(), utf8: core::str::from_utf8(&lit.0).is_ok(), explicit_captures_len: 0, static_explicit_captures_len: Some(0), literal: true, alternation_literal: true, }; Properties(Box::new(inner)) } /// Create a new set of HIR properties for a character class. fn class(class: &Class) -> Properties { let inner = PropertiesI { minimum_len: class.minimum_len(), maximum_len: class.maximum_len(), look_set: LookSet::empty(), look_set_prefix: LookSet::empty(), look_set_suffix: LookSet::empty(), look_set_prefix_any: LookSet::empty(), look_set_suffix_any: LookSet::empty(), utf8: class.is_utf8(), explicit_captures_len: 0, static_explicit_captures_len: Some(0), literal: false, alternation_literal: false, }; Properties(Box::new(inner)) } /// Create a new set of HIR properties for a look-around assertion. fn look(look: Look) -> Properties { let inner = PropertiesI { minimum_len: Some(0), maximum_len: Some(0), look_set: LookSet::singleton(look), look_set_prefix: LookSet::singleton(look), look_set_suffix: LookSet::singleton(look), look_set_prefix_any: LookSet::singleton(look), look_set_suffix_any: LookSet::singleton(look), // This requires a little explanation. Basically, we don't consider // matching an empty string to be equivalent to matching invalid // UTF-8, even though technically matching every empty string will // split the UTF-8 encoding of a single codepoint when treating a // UTF-8 encoded string as a sequence of bytes. Our defense here is // that in such a case, a codepoint should logically be treated as // the fundamental atom for matching, and thus the only valid match // points are between codepoints and not bytes. // // More practically, this is true here because it's also true // for 'Hir::empty()', otherwise something like 'a*' would be // considered to match invalid UTF-8. That in turn makes this // property borderline useless. utf8: true, explicit_captures_len: 0, static_explicit_captures_len: Some(0), literal: false, alternation_literal: false, }; Properties(Box::new(inner)) } /// Create a new set of HIR properties for a repetition. fn repetition(rep: &Repetition) -> Properties { let p = rep.sub.properties(); let minimum_len = p.minimum_len().map(|child_min| { let rep_min = usize::try_from(rep.min).unwrap_or(usize::MAX); child_min.saturating_mul(rep_min) }); let maximum_len = rep.max.and_then(|rep_max| { let rep_max = usize::try_from(rep_max).ok()?; let child_max = p.maximum_len()?; child_max.checked_mul(rep_max) }); let mut inner = PropertiesI { minimum_len, maximum_len, look_set: p.look_set(), look_set_prefix: LookSet::empty(), look_set_suffix: LookSet::empty(), look_set_prefix_any: p.look_set_prefix_any(), look_set_suffix_any: p.look_set_suffix_any(), utf8: p.is_utf8(), explicit_captures_len: p.explicit_captures_len(), static_explicit_captures_len: p.static_explicit_captures_len(), literal: false, alternation_literal: false, }; // If the repetition operator can match the empty string, then its // lookset prefix and suffixes themselves remain empty since they are // no longer required to match. if rep.min > 0 { inner.look_set_prefix = p.look_set_prefix(); inner.look_set_suffix = p.look_set_suffix(); } // If the static captures len of the sub-expression is not known or // is greater than zero, then it automatically propagates to the // repetition, regardless of the repetition. Otherwise, it might // change, but only when the repetition can match 0 times. if rep.min == 0 && inner.static_explicit_captures_len.map_or(false, |len| len > 0) { // If we require a match 0 times, then our captures len is // guaranteed to be zero. Otherwise, if we *can* match the empty // string, then it's impossible to know how many captures will be // in the resulting match. if rep.max == Some(0) { inner.static_explicit_captures_len = Some(0); } else { inner.static_explicit_captures_len = None; } } Properties(Box::new(inner)) } /// Create a new set of HIR properties for a capture. fn capture(capture: &Capture) -> Properties { let p = capture.sub.properties(); Properties(Box::new(PropertiesI { explicit_captures_len: p.explicit_captures_len().saturating_add(1), static_explicit_captures_len: p .static_explicit_captures_len() .map(|len| len.saturating_add(1)), literal: false, alternation_literal: false, ..*p.0.clone() })) } /// Create a new set of HIR properties for a concatenation. fn concat(concat: &[Hir]) -> Properties { // The base case is an empty concatenation, which matches the empty // string. Note though that empty concatenations aren't possible, // because the Hir::concat smart constructor rewrites those as // Hir::empty. let mut props = PropertiesI { minimum_len: Some(0), maximum_len: Some(0), look_set: LookSet::empty(), look_set_prefix: LookSet::empty(), look_set_suffix: LookSet::empty(), look_set_prefix_any: LookSet::empty(), look_set_suffix_any: LookSet::empty(), utf8: true, explicit_captures_len: 0, static_explicit_captures_len: Some(0), literal: true, alternation_literal: true, }; // Handle properties that need to visit every child hir. for x in concat.iter() { let p = x.properties(); props.look_set.set_union(p.look_set()); props.utf8 = props.utf8 && p.is_utf8(); props.explicit_captures_len = props .explicit_captures_len .saturating_add(p.explicit_captures_len()); props.static_explicit_captures_len = p .static_explicit_captures_len() .and_then(|len1| { Some((len1, props.static_explicit_captures_len?)) }) .and_then(|(len1, len2)| Some(len1.saturating_add(len2))); props.literal = props.literal && p.is_literal(); props.alternation_literal = props.alternation_literal && p.is_alternation_literal(); if let Some(minimum_len) = props.minimum_len { match p.minimum_len() { None => props.minimum_len = None, Some(len) => { // We use saturating arithmetic here because the // minimum is just a lower bound. We can't go any // higher than what our number types permit. props.minimum_len = Some(minimum_len.saturating_add(len)); } } } if let Some(maximum_len) = props.maximum_len { match p.maximum_len() { None => props.maximum_len = None, Some(len) => { props.maximum_len = maximum_len.checked_add(len) } } } } // Handle the prefix properties, which only requires visiting // child exprs until one matches more than the empty string. let mut it = concat.iter(); while let Some(x) = it.next() { props.look_set_prefix.set_union(x.properties().look_set_prefix()); props .look_set_prefix_any .set_union(x.properties().look_set_prefix_any()); if x.properties().maximum_len().map_or(true, |x| x > 0) { break; } } // Same thing for the suffix properties, but in reverse. let mut it = concat.iter().rev(); while let Some(x) = it.next() { props.look_set_suffix.set_union(x.properties().look_set_suffix()); props .look_set_suffix_any .set_union(x.properties().look_set_suffix_any()); if x.properties().maximum_len().map_or(true, |x| x > 0) { break; } } Properties(Box::new(props)) } /// Create a new set of HIR properties for a concatenation. fn alternation(alts: &[Hir]) -> Properties { Properties::union(alts.iter().map(|hir| hir.properties())) } } /// A set of look-around assertions. /// /// This is useful for efficiently tracking look-around assertions. For /// example, an [`Hir`] provides properties that return `LookSet`s. #[derive(Clone, Copy, Default, Eq, PartialEq)] pub struct LookSet { /// The underlying representation this set is exposed to make it possible /// to store it somewhere efficiently. The representation is that /// of a bitset, where each assertion occupies bit `i` where `i = /// Look::as_repr()`. /// /// Note that users of this internal representation must permit the full /// range of `u16` values to be represented. For example, even if the /// current implementation only makes use of the 10 least significant bits, /// it may use more bits in a future semver compatible release. pub bits: u16, } impl LookSet { /// Create an empty set of look-around assertions. #[inline] pub fn empty() -> LookSet { LookSet { bits: 0 } } /// Create a full set of look-around assertions. /// /// This set contains all possible look-around assertions. #[inline] pub fn full() -> LookSet { LookSet { bits: !0 } } /// Create a look-around set containing the look-around assertion given. /// /// This is a convenience routine for creating an empty set and inserting /// one look-around assertions. #[inline] pub fn singleton(look: Look) -> LookSet { LookSet::empty().insert(look) } /// Returns the total number of look-around assertions in this set. #[inline] pub fn len(self) -> usize { // OK because max value always fits in a u8, which in turn always // fits in a usize, regardless of target. usize::try_from(self.bits.count_ones()).unwrap() } /// Returns true if and only if this set is empty. #[inline] pub fn is_empty(self) -> bool { self.len() == 0 } /// Returns true if and only if the given look-around assertion is in this /// set. #[inline] pub fn contains(self, look: Look) -> bool { self.bits & look.as_repr() != 0 } /// Returns true if and only if this set contains any anchor assertions. /// This includes both "start/end of haystack" and "start/end of line." #[inline] pub fn contains_anchor(&self) -> bool { self.contains_anchor_haystack() || self.contains_anchor_line() } /// Returns true if and only if this set contains any "start/end of /// haystack" anchors. This doesn't include "start/end of line" anchors. #[inline] pub fn contains_anchor_haystack(&self) -> bool { self.contains(Look::Start) || self.contains(Look::End) } /// Returns true if and only if this set contains any "start/end of line" /// anchors. This doesn't include "start/end of haystack" anchors. This /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors. #[inline] pub fn contains_anchor_line(&self) -> bool { self.contains(Look::StartLF) || self.contains(Look::EndLF) || self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) } /// Returns true if and only if this set contains any "start/end of line" /// anchors that only treat `\n` as line terminators. This does not include /// haystack anchors or CRLF aware line anchors. #[inline] pub fn contains_anchor_lf(&self) -> bool { self.contains(Look::StartLF) || self.contains(Look::EndLF) } /// Returns true if and only if this set contains any "start/end of line" /// anchors that are CRLF-aware. This doesn't include "start/end of /// haystack" or "start/end of line-feed" anchors. #[inline] pub fn contains_anchor_crlf(&self) -> bool { self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) } /// Returns true if and only if this set contains any word boundary or /// negated word boundary assertions. This include both Unicode and ASCII /// word boundaries. #[inline] pub fn contains_word(self) -> bool { self.contains_word_unicode() || self.contains_word_ascii() } /// Returns true if and only if this set contains any Unicode word boundary /// or negated Unicode word boundary assertions. #[inline] pub fn contains_word_unicode(self) -> bool { self.contains(Look::WordUnicode) || self.contains(Look::WordUnicodeNegate) } /// Returns true if and only if this set contains any ASCII word boundary /// or negated ASCII word boundary assertions. #[inline] pub fn contains_word_ascii(self) -> bool { self.contains(Look::WordAscii) || self.contains(Look::WordAsciiNegate) } /// Returns an iterator over all of the look-around assertions in this set. #[inline] pub fn iter(self) -> LookSetIter { LookSetIter { set: self } } /// Return a new set that is equivalent to the original, but with the given /// assertion added to it. If the assertion is already in the set, then the /// returned set is equivalent to the original. #[inline] pub fn insert(self, look: Look) -> LookSet { LookSet { bits: self.bits | look.as_repr() } } /// Updates this set in place with the result of inserting the given /// assertion into this set. #[inline] pub fn set_insert(&mut self, look: Look) { *self = self.insert(look); } /// Return a new set that is equivalent to the original, but with the given /// assertion removed from it. If the assertion is not in the set, then the /// returned set is equivalent to the original. #[inline] pub fn remove(self, look: Look) -> LookSet { LookSet { bits: self.bits & !look.as_repr() } } /// Updates this set in place with the result of removing the given /// assertion from this set. #[inline] pub fn set_remove(&mut self, look: Look) { *self = self.remove(look); } /// Returns a new set that is the result of subtracting the given set from /// this set. #[inline] pub fn subtract(self, other: LookSet) -> LookSet { LookSet { bits: self.bits & !other.bits } } /// Updates this set in place with the result of subtracting the given set /// from this set. #[inline] pub fn set_subtract(&mut self, other: LookSet) { *self = self.subtract(other); } /// Returns a new set that is the union of this and the one given. #[inline] pub fn union(self, other: LookSet) -> LookSet { LookSet { bits: self.bits | other.bits } } /// Updates this set in place with the result of unioning it with the one /// given. #[inline] pub fn set_union(&mut self, other: LookSet) { *self = self.union(other); } /// Returns a new set that is the intersection of this and the one given. #[inline] pub fn intersect(self, other: LookSet) -> LookSet { LookSet { bits: self.bits & other.bits } } /// Updates this set in place with the result of intersecting it with the /// one given. #[inline] pub fn set_intersect(&mut self, other: LookSet) { *self = self.intersect(other); } /// Return a `LookSet` from the slice given as a native endian 16-bit /// integer. /// /// # Panics /// /// This panics if `slice.len() < 2`. #[inline] pub fn read_repr(slice: &[u8]) -> LookSet { let bits = u16::from_ne_bytes(slice[..2].try_into().unwrap()); LookSet { bits } } /// Write a `LookSet` as a native endian 16-bit integer to the beginning /// of the slice given. /// /// # Panics /// /// This panics if `slice.len() < 2`. #[inline] pub fn write_repr(self, slice: &mut [u8]) { let raw = self.bits.to_ne_bytes(); slice[0] = raw[0]; slice[1] = raw[1]; } } impl core::fmt::Debug for LookSet { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { if self.is_empty() { return write!(f, "∅"); } for look in self.iter() { write!(f, "{}", look.as_char())?; } Ok(()) } } /// An iterator over all look-around assertions in a [`LookSet`]. /// /// This iterator is created by [`LookSet::iter`]. #[derive(Clone, Debug)] pub struct LookSetIter { set: LookSet, } impl Iterator for LookSetIter { type Item = Look; #[inline] fn next(&mut self) -> Option<Look> { if self.set.is_empty() { return None; } // We'll never have more than u8::MAX distinct look-around assertions, // so 'repr' will always fit into a u16. let repr = u16::try_from(self.set.bits.trailing_zeros()).unwrap(); let look = Look::from_repr(1 << repr)?; self.set = self.set.remove(look); Some(look) } } /// Given a sequence of HIR values where each value corresponds to a Unicode /// class (or an all-ASCII byte class), return a single Unicode class /// corresponding to the union of the classes found. fn class_chars(hirs: &[Hir]) -> Option<Class> { let mut cls = ClassUnicode::new(vec![]); for hir in hirs.iter() { match *hir.kind() { HirKind::Class(Class::Unicode(ref cls2)) => { cls.union(cls2); } HirKind::Class(Class::Bytes(ref cls2)) => { cls.union(&cls2.to_unicode_class()?); } _ => return None, }; } Some(Class::Unicode(cls)) } /// Given a sequence of HIR values where each value corresponds to a byte class /// (or an all-ASCII Unicode class), return a single byte class corresponding /// to the union of the classes found. fn class_bytes(hirs: &[Hir]) -> Option<Class> { let mut cls = ClassBytes::new(vec![]); for hir in hirs.iter() { match *hir.kind() { HirKind::Class(Class::Unicode(ref cls2)) => { cls.union(&cls2.to_byte_class()?); } HirKind::Class(Class::Bytes(ref cls2)) => { cls.union(cls2); } _ => return None, }; } Some(Class::Bytes(cls)) } /// Given a sequence of HIR values where each value corresponds to a literal /// that is a single `char`, return that sequence of `char`s. Otherwise return /// None. No deduplication is done. fn singleton_chars(hirs: &[Hir]) -> Option<Vec<char>> { let mut singletons = vec![]; for hir in hirs.iter() { let literal = match *hir.kind() { HirKind::Literal(Literal(ref bytes)) => bytes, _ => return None, }; let ch = match crate::debug::utf8_decode(literal) { None => return None, Some(Err(_)) => return None, Some(Ok(ch)) => ch, }; if literal.len() != ch.len_utf8() { return None; } singletons.push(ch); } Some(singletons) } /// Given a sequence of HIR values where each value corresponds to a literal /// that is a single byte, return that sequence of bytes. Otherwise return /// None. No deduplication is done. fn singleton_bytes(hirs: &[Hir]) -> Option<Vec<u8>> { let mut singletons = vec![]; for hir in hirs.iter() { let literal = match *hir.kind() { HirKind::Literal(Literal(ref bytes)) => bytes, _ => return None, }; if literal.len() != 1 { return None; } singletons.push(literal[0]); } Some(singletons) } /// Looks for a common prefix in the list of alternation branches given. If one /// is found, then an equivalent but (hopefully) simplified Hir is returned. /// Otherwise, the original given list of branches is returned unmodified. /// /// This is not quite as good as it could be. Right now, it requires that /// all branches are 'Concat' expressions. It also doesn't do well with /// literals. For example, given 'foofoo|foobar', it will not refactor it to /// 'foo(?:foo|bar)' because literals are flattened into their own special /// concatenation. (One wonders if perhaps 'Literal' should be a single atom /// instead of a string of bytes because of this. Otherwise, handling the /// current representation in this routine will be pretty gnarly. Sigh.) fn lift_common_prefix(hirs: Vec<Hir>) -> Result<Hir, Vec<Hir>> { if hirs.len() <= 1 { return Err(hirs); } let mut prefix = match hirs[0].kind() { HirKind::Concat(ref xs) => &**xs, _ => return Err(hirs), }; if prefix.is_empty() { return Err(hirs); } for h in hirs.iter().skip(1) { let concat = match h.kind() { HirKind::Concat(ref xs) => xs, _ => return Err(hirs), }; let common_len = prefix .iter() .zip(concat.iter()) .take_while(|(x, y)| x == y) .count(); prefix = &prefix[..common_len]; if prefix.is_empty() { return Err(hirs); } } let len = prefix.len(); assert_ne!(0, len); let mut prefix_concat = vec![]; let mut suffix_alts = vec![]; for h in hirs { let mut concat = match h.into_kind() { HirKind::Concat(xs) => xs, // We required all sub-expressions to be // concats above, so we're only here if we // have a concat. _ => unreachable!(), }; suffix_alts.push(Hir::concat(concat.split_off(len))); if prefix_concat.is_empty() { prefix_concat = concat; } } let mut concat = prefix_concat; concat.push(Hir::alternation(suffix_alts)); Ok(Hir::concat(concat)) } #[cfg(test)] mod tests { use super::*; fn uclass(ranges: &[(char, char)]) -> ClassUnicode { let ranges: Vec<ClassUnicodeRange> = ranges .iter() .map(|&(s, e)| ClassUnicodeRange::new(s, e)) .collect(); ClassUnicode::new(ranges) } fn bclass(ranges: &[(u8, u8)]) -> ClassBytes { let ranges: Vec<ClassBytesRange> = ranges.iter().map(|&(s, e)| ClassBytesRange::new(s, e)).collect(); ClassBytes::new(ranges) } fn uranges(cls: &ClassUnicode) -> Vec<(char, char)> { cls.iter().map(|x| (x.start(), x.end())).collect() } #[cfg(feature = "unicode-case")] fn ucasefold(cls: &ClassUnicode) -> ClassUnicode { let mut cls_ = cls.clone(); cls_.case_fold_simple(); cls_ } fn uunion(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { let mut cls_ = cls1.clone(); cls_.union(cls2); cls_ } fn uintersect(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { let mut cls_ = cls1.clone(); cls_.intersect(cls2); cls_ } fn udifference(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { let mut cls_ = cls1.clone(); cls_.difference(cls2); cls_ } fn usymdifference( cls1: &ClassUnicode, cls2: &ClassUnicode, ) -> ClassUnicode { let mut cls_ = cls1.clone(); cls_.symmetric_difference(cls2); cls_ } fn unegate(cls: &ClassUnicode) -> ClassUnicode { let mut cls_ = cls.clone(); cls_.negate(); cls_ } fn branges(cls: &ClassBytes) -> Vec<(u8, u8)> { cls.iter().map(|x| (x.start(), x.end())).collect() } fn bcasefold(cls: &ClassBytes) -> ClassBytes { let mut cls_ = cls.clone(); cls_.case_fold_simple(); cls_ } fn bunion(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { let mut cls_ = cls1.clone(); cls_.union(cls2); cls_ } fn bintersect(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { let mut cls_ = cls1.clone(); cls_.intersect(cls2); cls_ } fn bdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { let mut cls_ = cls1.clone(); cls_.difference(cls2); cls_ } fn bsymdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { let mut cls_ = cls1.clone(); cls_.symmetric_difference(cls2); cls_ } fn bnegate(cls: &ClassBytes) -> ClassBytes { let mut cls_ = cls.clone(); cls_.negate(); cls_ } #[test] fn class_range_canonical_unicode() { let range = ClassUnicodeRange::new('\u{00FF}', '\0'); assert_eq!('\0', range.start()); assert_eq!('\u{00FF}', range.end()); } #[test] fn class_range_canonical_bytes() { let range = ClassBytesRange::new(b'\xFF', b'\0'); assert_eq!(b'\0', range.start()); assert_eq!(b'\xFF', range.end()); } #[test] fn class_canonicalize_unicode() { let cls = uclass(&[('a', 'c'), ('x', 'z')]); let expected = vec![('a', 'c'), ('x', 'z')]; assert_eq!(expected, uranges(&cls)); let cls = uclass(&[('x', 'z'), ('a', 'c')]); let expected = vec![('a', 'c'), ('x', 'z')]; assert_eq!(expected, uranges(&cls)); let cls = uclass(&[('x', 'z'), ('w', 'y')]); let expected = vec![('w', 'z')]; assert_eq!(expected, uranges(&cls)); let cls = uclass(&[ ('c', 'f'), ('a', 'g'), ('d', 'j'), ('a', 'c'), ('m', 'p'), ('l', 's'), ]); let expected = vec![('a', 'j'), ('l', 's')]; assert_eq!(expected, uranges(&cls)); let cls = uclass(&[('x', 'z'), ('u', 'w')]); let expected = vec![('u', 'z')]; assert_eq!(expected, uranges(&cls)); let cls = uclass(&[('\x00', '\u{10FFFF}'), ('\x00', '\u{10FFFF}')]); let expected = vec![('\x00', '\u{10FFFF}')]; assert_eq!(expected, uranges(&cls)); let cls = uclass(&[('a', 'a'), ('b', 'b')]); let expected = vec![('a', 'b')]; assert_eq!(expected, uranges(&cls)); } #[test] fn class_canonicalize_bytes() { let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); let expected = vec![(b'a', b'c'), (b'x', b'z')]; assert_eq!(expected, branges(&cls)); let cls = bclass(&[(b'x', b'z'), (b'a', b'c')]); let expected = vec![(b'a', b'c'), (b'x', b'z')]; assert_eq!(expected, branges(&cls)); let cls = bclass(&[(b'x', b'z'), (b'w', b'y')]); let expected = vec![(b'w', b'z')]; assert_eq!(expected, branges(&cls)); let cls = bclass(&[ (b'c', b'f'), (b'a', b'g'), (b'd', b'j'), (b'a', b'c'), (b'm', b'p'), (b'l', b's'), ]); let expected = vec![(b'a', b'j'), (b'l', b's')]; assert_eq!(expected, branges(&cls)); let cls = bclass(&[(b'x', b'z'), (b'u', b'w')]); let expected = vec![(b'u', b'z')]; assert_eq!(expected, branges(&cls)); let cls = bclass(&[(b'\x00', b'\xFF'), (b'\x00', b'\xFF')]); let expected = vec![(b'\x00', b'\xFF')]; assert_eq!(expected, branges(&cls)); let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); let expected = vec![(b'a', b'b')]; assert_eq!(expected, branges(&cls)); } #[test] #[cfg(feature = "unicode-case")] fn class_case_fold_unicode() { let cls = uclass(&[ ('C', 'F'), ('A', 'G'), ('D', 'J'), ('A', 'C'), ('M', 'P'), ('L', 'S'), ('c', 'f'), ]); let expected = uclass(&[ ('A', 'J'), ('L', 'S'), ('a', 'j'), ('l', 's'), ('\u{17F}', '\u{17F}'), ]); assert_eq!(expected, ucasefold(&cls)); let cls = uclass(&[('A', 'Z')]); let expected = uclass(&[ ('A', 'Z'), ('a', 'z'), ('\u{17F}', '\u{17F}'), ('\u{212A}', '\u{212A}'), ]); assert_eq!(expected, ucasefold(&cls)); let cls = uclass(&[('a', 'z')]); let expected = uclass(&[ ('A', 'Z'), ('a', 'z'), ('\u{17F}', '\u{17F}'), ('\u{212A}', '\u{212A}'), ]); assert_eq!(expected, ucasefold(&cls)); let cls = uclass(&[('A', 'A'), ('_', '_')]); let expected = uclass(&[('A', 'A'), ('_', '_'), ('a', 'a')]); assert_eq!(expected, ucasefold(&cls)); let cls = uclass(&[('A', 'A'), ('=', '=')]); let expected = uclass(&[('=', '='), ('A', 'A'), ('a', 'a')]); assert_eq!(expected, ucasefold(&cls)); let cls = uclass(&[('\x00', '\x10')]); assert_eq!(cls, ucasefold(&cls)); let cls = uclass(&[('k', 'k')]); let expected = uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}')]); assert_eq!(expected, ucasefold(&cls)); let cls = uclass(&[('@', '@')]); assert_eq!(cls, ucasefold(&cls)); } #[test] #[cfg(not(feature = "unicode-case"))] fn class_case_fold_unicode_disabled() { let mut cls = uclass(&[ ('C', 'F'), ('A', 'G'), ('D', 'J'), ('A', 'C'), ('M', 'P'), ('L', 'S'), ('c', 'f'), ]); assert!(cls.try_case_fold_simple().is_err()); } #[test] #[should_panic] #[cfg(not(feature = "unicode-case"))] fn class_case_fold_unicode_disabled_panics() { let mut cls = uclass(&[ ('C', 'F'), ('A', 'G'), ('D', 'J'), ('A', 'C'), ('M', 'P'), ('L', 'S'), ('c', 'f'), ]); cls.case_fold_simple(); } #[test] fn class_case_fold_bytes() { let cls = bclass(&[ (b'C', b'F'), (b'A', b'G'), (b'D', b'J'), (b'A', b'C'), (b'M', b'P'), (b'L', b'S'), (b'c', b'f'), ]); let expected = bclass(&[(b'A', b'J'), (b'L', b'S'), (b'a', b'j'), (b'l', b's')]); assert_eq!(expected, bcasefold(&cls)); let cls = bclass(&[(b'A', b'Z')]); let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); assert_eq!(expected, bcasefold(&cls)); let cls = bclass(&[(b'a', b'z')]); let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); assert_eq!(expected, bcasefold(&cls)); let cls = bclass(&[(b'A', b'A'), (b'_', b'_')]); let expected = bclass(&[(b'A', b'A'), (b'_', b'_'), (b'a', b'a')]); assert_eq!(expected, bcasefold(&cls)); let cls = bclass(&[(b'A', b'A'), (b'=', b'=')]); let expected = bclass(&[(b'=', b'='), (b'A', b'A'), (b'a', b'a')]); assert_eq!(expected, bcasefold(&cls)); let cls = bclass(&[(b'\x00', b'\x10')]); assert_eq!(cls, bcasefold(&cls)); let cls = bclass(&[(b'k', b'k')]); let expected = bclass(&[(b'K', b'K'), (b'k', b'k')]); assert_eq!(expected, bcasefold(&cls)); let cls = bclass(&[(b'@', b'@')]); assert_eq!(cls, bcasefold(&cls)); } #[test] fn class_negate_unicode() { let cls = uclass(&[('a', 'a')]); let expected = uclass(&[('\x00', '\x60'), ('\x62', '\u{10FFFF}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('a', 'a'), ('b', 'b')]); let expected = uclass(&[('\x00', '\x60'), ('\x63', '\u{10FFFF}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('a', 'c'), ('x', 'z')]); let expected = uclass(&[ ('\x00', '\x60'), ('\x64', '\x77'), ('\x7B', '\u{10FFFF}'), ]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('\x00', 'a')]); let expected = uclass(&[('\x62', '\u{10FFFF}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('a', '\u{10FFFF}')]); let expected = uclass(&[('\x00', '\x60')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('\x00', '\u{10FFFF}')]); let expected = uclass(&[]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[]); let expected = uclass(&[('\x00', '\u{10FFFF}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('\x00', '\u{10FFFD}'), ('\u{10FFFF}', '\u{10FFFF}')]); let expected = uclass(&[('\u{10FFFE}', '\u{10FFFE}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('\x00', '\u{D7FF}')]); let expected = uclass(&[('\u{E000}', '\u{10FFFF}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('\x00', '\u{D7FE}')]); let expected = uclass(&[('\u{D7FF}', '\u{10FFFF}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('\u{E000}', '\u{10FFFF}')]); let expected = uclass(&[('\x00', '\u{D7FF}')]); assert_eq!(expected, unegate(&cls)); let cls = uclass(&[('\u{E001}', '\u{10FFFF}')]); let expected = uclass(&[('\x00', '\u{E000}')]); assert_eq!(expected, unegate(&cls)); } #[test] fn class_negate_bytes() { let cls = bclass(&[(b'a', b'a')]); let expected = bclass(&[(b'\x00', b'\x60'), (b'\x62', b'\xFF')]); assert_eq!(expected, bnegate(&cls)); let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); let expected = bclass(&[(b'\x00', b'\x60'), (b'\x63', b'\xFF')]); assert_eq!(expected, bnegate(&cls)); let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); let expected = bclass(&[ (b'\x00', b'\x60'), (b'\x64', b'\x77'), (b'\x7B', b'\xFF'), ]); assert_eq!(expected, bnegate(&cls)); let cls = bclass(&[(b'\x00', b'a')]); let expected = bclass(&[(b'\x62', b'\xFF')]); assert_eq!(expected, bnegate(&cls)); let cls = bclass(&[(b'a', b'\xFF')]); let expected = bclass(&[(b'\x00', b'\x60')]); assert_eq!(expected, bnegate(&cls)); let cls = bclass(&[(b'\x00', b'\xFF')]); let expected = bclass(&[]); assert_eq!(expected, bnegate(&cls)); let cls = bclass(&[]); let expected = bclass(&[(b'\x00', b'\xFF')]); assert_eq!(expected, bnegate(&cls)); let cls = bclass(&[(b'\x00', b'\xFD'), (b'\xFF', b'\xFF')]); let expected = bclass(&[(b'\xFE', b'\xFE')]); assert_eq!(expected, bnegate(&cls)); } #[test] fn class_union_unicode() { let cls1 = uclass(&[('a', 'g'), ('m', 't'), ('A', 'C')]); let cls2 = uclass(&[('a', 'z')]); let expected = uclass(&[('a', 'z'), ('A', 'C')]); assert_eq!(expected, uunion(&cls1, &cls2)); } #[test] fn class_union_bytes() { let cls1 = bclass(&[(b'a', b'g'), (b'm', b't'), (b'A', b'C')]); let cls2 = bclass(&[(b'a', b'z')]); let expected = bclass(&[(b'a', b'z'), (b'A', b'C')]); assert_eq!(expected, bunion(&cls1, &cls2)); } #[test] fn class_intersect_unicode() { let cls1 = uclass(&[]); let cls2 = uclass(&[('a', 'a')]); let expected = uclass(&[]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'a')]); let cls2 = uclass(&[('a', 'a')]); let expected = uclass(&[('a', 'a')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'a')]); let cls2 = uclass(&[('b', 'b')]); let expected = uclass(&[]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'a')]); let cls2 = uclass(&[('a', 'c')]); let expected = uclass(&[('a', 'a')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b')]); let cls2 = uclass(&[('a', 'c')]); let expected = uclass(&[('a', 'b')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b')]); let cls2 = uclass(&[('b', 'c')]); let expected = uclass(&[('b', 'b')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b')]); let cls2 = uclass(&[('c', 'd')]); let expected = uclass(&[]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('b', 'c')]); let cls2 = uclass(&[('a', 'd')]); let expected = uclass(&[('b', 'c')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); let cls2 = uclass(&[('a', 'h')]); let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); let cls2 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b'), ('g', 'h')]); let cls2 = uclass(&[('d', 'e'), ('k', 'l')]); let expected = uclass(&[]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); let cls2 = uclass(&[('h', 'h')]); let expected = uclass(&[('h', 'h')]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b'), ('e', 'f'), ('i', 'j')]); let cls2 = uclass(&[('c', 'd'), ('g', 'h'), ('k', 'l')]); let expected = uclass(&[]); assert_eq!(expected, uintersect(&cls1, &cls2)); let cls1 = uclass(&[('a', 'b'), ('c', 'd'), ('e', 'f')]); let cls2 = uclass(&[('b', 'c'), ('d', 'e'), ('f', 'g')]); let expected = uclass(&[('b', 'f')]); assert_eq!(expected, uintersect(&cls1, &cls2)); } #[test] fn class_intersect_bytes() { let cls1 = bclass(&[]); let cls2 = bclass(&[(b'a', b'a')]); let expected = bclass(&[]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'a')]); let cls2 = bclass(&[(b'a', b'a')]); let expected = bclass(&[(b'a', b'a')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'a')]); let cls2 = bclass(&[(b'b', b'b')]); let expected = bclass(&[]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'a')]); let cls2 = bclass(&[(b'a', b'c')]); let expected = bclass(&[(b'a', b'a')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b')]); let cls2 = bclass(&[(b'a', b'c')]); let expected = bclass(&[(b'a', b'b')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b')]); let cls2 = bclass(&[(b'b', b'c')]); let expected = bclass(&[(b'b', b'b')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b')]); let cls2 = bclass(&[(b'c', b'd')]); let expected = bclass(&[]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'b', b'c')]); let cls2 = bclass(&[(b'a', b'd')]); let expected = bclass(&[(b'b', b'c')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); let cls2 = bclass(&[(b'a', b'h')]); let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); let cls2 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b'), (b'g', b'h')]); let cls2 = bclass(&[(b'd', b'e'), (b'k', b'l')]); let expected = bclass(&[]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); let cls2 = bclass(&[(b'h', b'h')]); let expected = bclass(&[(b'h', b'h')]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b'), (b'e', b'f'), (b'i', b'j')]); let cls2 = bclass(&[(b'c', b'd'), (b'g', b'h'), (b'k', b'l')]); let expected = bclass(&[]); assert_eq!(expected, bintersect(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'b'), (b'c', b'd'), (b'e', b'f')]); let cls2 = bclass(&[(b'b', b'c'), (b'd', b'e'), (b'f', b'g')]); let expected = bclass(&[(b'b', b'f')]); assert_eq!(expected, bintersect(&cls1, &cls2)); } #[test] fn class_difference_unicode() { let cls1 = uclass(&[('a', 'a')]); let cls2 = uclass(&[('a', 'a')]); let expected = uclass(&[]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'a')]); let cls2 = uclass(&[]); let expected = uclass(&[('a', 'a')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[]); let cls2 = uclass(&[('a', 'a')]); let expected = uclass(&[]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'z')]); let cls2 = uclass(&[('a', 'a')]); let expected = uclass(&[('b', 'z')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'z')]); let cls2 = uclass(&[('z', 'z')]); let expected = uclass(&[('a', 'y')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'z')]); let cls2 = uclass(&[('m', 'm')]); let expected = uclass(&[('a', 'l'), ('n', 'z')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); let cls2 = uclass(&[('a', 'z')]); let expected = uclass(&[]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); let cls2 = uclass(&[('d', 'v')]); let expected = uclass(&[('a', 'c')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); let cls2 = uclass(&[('b', 'g'), ('s', 'u')]); let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); let cls2 = uclass(&[('b', 'd'), ('e', 'g'), ('s', 'u')]); let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('x', 'z')]); let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); let expected = uclass(&[('x', 'z')]); assert_eq!(expected, udifference(&cls1, &cls2)); let cls1 = uclass(&[('a', 'z')]); let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); let expected = uclass(&[('d', 'd'), ('h', 'r'), ('v', 'z')]); assert_eq!(expected, udifference(&cls1, &cls2)); } #[test] fn class_difference_bytes() { let cls1 = bclass(&[(b'a', b'a')]); let cls2 = bclass(&[(b'a', b'a')]); let expected = bclass(&[]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'a')]); let cls2 = bclass(&[]); let expected = bclass(&[(b'a', b'a')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[]); let cls2 = bclass(&[(b'a', b'a')]); let expected = bclass(&[]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'z')]); let cls2 = bclass(&[(b'a', b'a')]); let expected = bclass(&[(b'b', b'z')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'z')]); let cls2 = bclass(&[(b'z', b'z')]); let expected = bclass(&[(b'a', b'y')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'z')]); let cls2 = bclass(&[(b'm', b'm')]); let expected = bclass(&[(b'a', b'l'), (b'n', b'z')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); let cls2 = bclass(&[(b'a', b'z')]); let expected = bclass(&[]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); let cls2 = bclass(&[(b'd', b'v')]); let expected = bclass(&[(b'a', b'c')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); let cls2 = bclass(&[(b'b', b'g'), (b's', b'u')]); let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); let cls2 = bclass(&[(b'b', b'd'), (b'e', b'g'), (b's', b'u')]); let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'x', b'z')]); let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); let expected = bclass(&[(b'x', b'z')]); assert_eq!(expected, bdifference(&cls1, &cls2)); let cls1 = bclass(&[(b'a', b'z')]); let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); let expected = bclass(&[(b'd', b'd'), (b'h', b'r'), (b'v', b'z')]); assert_eq!(expected, bdifference(&cls1, &cls2)); } #[test] fn class_symmetric_difference_unicode() { let cls1 = uclass(&[('a', 'm')]); let cls2 = uclass(&[('g', 't')]); let expected = uclass(&[('a', 'f'), ('n', 't')]); assert_eq!(expected, usymdifference(&cls1, &cls2)); } #[test] fn class_symmetric_difference_bytes() { let cls1 = bclass(&[(b'a', b'm')]); let cls2 = bclass(&[(b'g', b't')]); let expected = bclass(&[(b'a', b'f'), (b'n', b't')]); assert_eq!(expected, bsymdifference(&cls1, &cls2)); } // We use a thread with an explicit stack size to test that our destructor // for Hir can handle arbitrarily sized expressions in constant stack // space. In case we run on a platform without threads (WASM?), we limit // this test to Windows/Unix. #[test] #[cfg(any(unix, windows))] fn no_stack_overflow_on_drop() { use std::thread; let run = || { let mut expr = Hir::empty(); for _ in 0..100 { expr = Hir::capture(Capture { index: 1, name: None, sub: Box::new(expr), }); expr = Hir::repetition(Repetition { min: 0, max: Some(1), greedy: true, sub: Box::new(expr), }); expr = Hir { kind: HirKind::Concat(vec![expr]), props: Properties::empty(), }; expr = Hir { kind: HirKind::Alternation(vec![expr]), props: Properties::empty(), }; } assert!(!matches!(*expr.kind(), HirKind::Empty)); }; // We run our test on a thread with a small stack size so we can // force the issue more easily. // // NOTE(2023-03-21): See the corresponding test in 'crate::ast::tests' // for context on the specific stack size chosen here. thread::Builder::new() .stack_size(16 << 10) .spawn(run) .unwrap() .join() .unwrap(); } #[test] fn look_set_iter() { let set = LookSet::empty(); assert_eq!(0, set.iter().count()); let set = LookSet::full(); assert_eq!(10, set.iter().count()); let set = LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode); assert_eq!(2, set.iter().count()); let set = LookSet::empty().insert(Look::StartLF); assert_eq!(1, set.iter().count()); let set = LookSet::empty().insert(Look::WordAsciiNegate); assert_eq!(1, set.iter().count()); } #[test] fn look_set_debug() { let res = format!("{:?}", LookSet::empty()); assert_eq!("∅", res); let res = format!("{:?}", LookSet::full()); assert_eq!("Az^$rRbB𝛃𝚩", res); } } <file_sep>/regex-lite/src/hir/parse.rs use core::cell::{Cell, RefCell}; use alloc::{ boxed::Box, string::{String, ToString}, vec, vec::Vec, }; use crate::{ error::Error, hir::{self, Config, Flags, Hir, HirKind}, }; // These are all of the errors that can occur while parsing a regex. Unlike // regex-syntax, our errors are not particularly great. They are just enough // to get a general sense of what went wrong. But in exchange, the error // reporting mechanism is *much* simpler than what's in regex-syntax. // // By convention, we use each of these messages in exactly one place. That // way, every branch that leads to an error has a unique message. This in turn // means that given a message, one can precisely identify which part of the // parser reported it. // // Finally, we give names to each message so that we can reference them in // tests. const ERR_TOO_MUCH_NESTING: &str = "pattern has too much nesting"; const ERR_TOO_MANY_CAPTURES: &str = "too many capture groups"; const ERR_DUPLICATE_CAPTURE_NAME: &str = "duplicate capture group name"; const ERR_UNCLOSED_GROUP: &str = "found open group without closing ')'"; const ERR_UNCLOSED_GROUP_QUESTION: &str = "expected closing ')', but got end of pattern"; const ERR_UNOPENED_GROUP: &str = "found closing ')' without matching '('"; const ERR_LOOK_UNSUPPORTED: &str = "look-around is not supported"; const ERR_EMPTY_FLAGS: &str = "empty flag directive '(?)' is not allowed"; const ERR_MISSING_GROUP_NAME: &str = "expected capture group name, but got end of pattern"; const ERR_INVALID_GROUP_NAME: &str = "invalid group name"; const ERR_UNCLOSED_GROUP_NAME: &str = "expected end of capture group name, but got end of pattern"; const ERR_EMPTY_GROUP_NAME: &str = "empty capture group names are not allowed"; const ERR_FLAG_UNRECOGNIZED: &str = "unrecognized inline flag"; const ERR_FLAG_REPEATED_NEGATION: &str = "inline flag negation cannot be repeated"; const ERR_FLAG_DUPLICATE: &str = "duplicate inline flag is not allowed"; const ERR_FLAG_UNEXPECTED_EOF: &str = "expected ':' or ')' to end inline flags, but got end of pattern"; const ERR_FLAG_DANGLING_NEGATION: &str = "inline flags cannot end with negation directive"; const ERR_DECIMAL_NO_DIGITS: &str = "expected decimal number, but found no digits"; const ERR_DECIMAL_INVALID: &str = "got invalid decimal number"; const ERR_HEX_BRACE_INVALID_DIGIT: &str = "expected hexadecimal number in braces, but got non-hex digit"; const ERR_HEX_BRACE_UNEXPECTED_EOF: &str = "expected hexadecimal number, but saw end of pattern before closing brace"; const ERR_HEX_BRACE_EMPTY: &str = "expected hexadecimal number in braces, but got no digits"; const ERR_HEX_BRACE_INVALID: &str = "got invalid hexadecimal number in braces"; const ERR_HEX_FIXED_UNEXPECTED_EOF: &str = "expected fixed length hexadecimal number, but saw end of pattern first"; const ERR_HEX_FIXED_INVALID_DIGIT: &str = "expected fixed length hexadecimal number, but got non-hex digit"; const ERR_HEX_FIXED_INVALID: &str = "got invalid fixed length hexadecimal number"; const ERR_HEX_UNEXPECTED_EOF: &str = "expected hexadecimal number, but saw end of pattern first"; const ERR_ESCAPE_UNEXPECTED_EOF: &str = "saw start of escape sequence, but saw end of pattern before it finished"; const ERR_BACKREF_UNSUPPORTED: &str = "backreferences are not supported"; const ERR_UNICODE_CLASS_UNSUPPORTED: &str = "Unicode character classes are not supported"; const ERR_ESCAPE_UNRECOGNIZED: &str = "unrecognized escape sequence"; const ERR_POSIX_CLASS_UNRECOGNIZED: &str = "unrecognized POSIX character class"; const ERR_UNCOUNTED_REP_SUB_MISSING: &str = "uncounted repetition operator must be applied to a sub-expression"; const ERR_COUNTED_REP_SUB_MISSING: &str = "counted repetition operator must be applied to a sub-expression"; const ERR_COUNTED_REP_UNCLOSED: &str = "found unclosed counted repetition operator"; const ERR_COUNTED_REP_MIN_UNCLOSED: &str = "found incomplete and unclosed counted repetition operator"; const ERR_COUNTED_REP_COMMA_UNCLOSED: &str = "found counted repetition operator with a comma that is unclosed"; const ERR_COUNTED_REP_MIN_MAX_UNCLOSED: &str = "found counted repetition with min and max that is unclosed"; const ERR_COUNTED_REP_INVALID: &str = "expected closing brace for counted repetition, but got something else"; const ERR_COUNTED_REP_INVALID_RANGE: &str = "found counted repetition with a min bigger than its max"; const ERR_CLASS_UNCLOSED_AFTER_ITEM: &str = "non-empty character class has no closing bracket"; const ERR_CLASS_INVALID_RANGE_ITEM: &str = "character class ranges must start and end with a single character"; const ERR_CLASS_INVALID_ITEM: &str = "invalid escape sequence in character class"; const ERR_CLASS_UNCLOSED_AFTER_DASH: &str = "non-empty character class has no closing bracket after dash"; const ERR_CLASS_UNCLOSED_AFTER_NEGATION: &str = "negated character class has no closing bracket"; const ERR_CLASS_UNCLOSED_AFTER_CLOSING: &str = "character class begins with literal ']' but has no closing bracket"; const ERR_CLASS_INVALID_RANGE: &str = "invalid range in character class"; const ERR_CLASS_UNCLOSED: &str = "found unclosed character class"; const ERR_CLASS_NEST_UNSUPPORTED: &str = "nested character classes are not supported"; const ERR_CLASS_INTERSECTION_UNSUPPORTED: &str = "character class intersection is not supported"; const ERR_CLASS_DIFFERENCE_UNSUPPORTED: &str = "character class difference is not supported"; const ERR_CLASS_SYMDIFFERENCE_UNSUPPORTED: &str = "character class symmetric difference is not supported"; /// A regular expression parser. /// /// This parses a string representation of a regular expression into an /// abstract syntax tree. The size of the tree is proportional to the length /// of the regular expression pattern. /// /// A `Parser` can be configured in more detail via a [`ParserBuilder`]. #[derive(Clone, Debug)] pub(super) struct Parser<'a> { /// The configuration of the parser as given by the caller. config: Config, /// The pattern we're parsing as given by the caller. pattern: &'a str, /// The call depth of the parser. This is incremented for each /// sub-expression parsed. Its peak value is the maximum nesting of the /// pattern. depth: Cell<u32>, /// The current position of the parser. pos: Cell<usize>, /// The current codepoint of the parser. The codepoint corresponds to the /// codepoint encoded in `pattern` beginning at `pos`. /// /// This is `None` if and only if `pos == pattern.len()`. char: Cell<Option<char>>, /// The current capture index. capture_index: Cell<u32>, /// The flags that are currently set. flags: RefCell<Flags>, /// A sorted sequence of capture names. This is used to detect duplicate /// capture names and report an error if one is detected. capture_names: RefCell<Vec<String>>, } /// The constructor and a variety of helper routines. impl<'a> Parser<'a> { /// Build a parser from this configuration with the given pattern. pub(super) fn new(config: Config, pattern: &'a str) -> Parser<'a> { Parser { config, pattern, depth: Cell::new(0), pos: Cell::new(0), char: Cell::new(pattern.chars().next()), capture_index: Cell::new(0), flags: RefCell::new(config.flags), capture_names: RefCell::new(vec![]), } } /// Returns the full pattern string that we're parsing. fn pattern(&self) -> &str { self.pattern } /// Return the current byte offset of the parser. /// /// The offset starts at `0` from the beginning of the regular expression /// pattern string. fn pos(&self) -> usize { self.pos.get() } /// Increments the call depth of the parser. /// /// If the call depth would exceed the configured nest limit, then this /// returns an error. /// /// This returns the old depth. fn increment_depth(&self) -> Result<u32, Error> { let old = self.depth.get(); if old > self.config.nest_limit { return Err(Error::new(ERR_TOO_MUCH_NESTING)); } // OK because our depth starts at 0, and we return an error if it // ever reaches the limit. So the call depth can never exceed u32::MAX. let new = old.checked_add(1).unwrap(); self.depth.set(new); Ok(old) } /// Decrements the call depth of the parser. /// /// This panics if the current depth is 0. fn decrement_depth(&self) { let old = self.depth.get(); // If this fails then the caller has a bug in how they're incrementing // and decrementing the depth of the parser's call stack. let new = old.checked_sub(1).unwrap(); self.depth.set(new); } /// Return the codepoint at the current position of the parser. /// /// This panics if the parser is positioned at the end of the pattern. fn char(&self) -> char { self.char.get().expect("codepoint, but parser is done") } /// Returns true if the next call to `bump` would return false. fn is_done(&self) -> bool { self.pos() == self.pattern.len() } /// Returns the flags that are current set for this regex. fn flags(&self) -> Flags { *self.flags.borrow() } /// Bump the parser to the next Unicode scalar value. /// /// If the end of the input has been reached, then `false` is returned. fn bump(&self) -> bool { if self.is_done() { return false; } self.pos.set(self.pos() + self.char().len_utf8()); self.char.set(self.pattern()[self.pos()..].chars().next()); self.char.get().is_some() } /// If the substring starting at the current position of the parser has /// the given prefix, then bump the parser to the character immediately /// following the prefix and return true. Otherwise, don't bump the parser /// and return false. fn bump_if(&self, prefix: &str) -> bool { if self.pattern()[self.pos()..].starts_with(prefix) { for _ in 0..prefix.chars().count() { self.bump(); } true } else { false } } /// Bump the parser, and if the `x` flag is enabled, bump through any /// subsequent spaces. Return true if and only if the parser is not done. fn bump_and_bump_space(&self) -> bool { if !self.bump() { return false; } self.bump_space(); !self.is_done() } /// If the `x` flag is enabled (i.e., whitespace insensitivity with /// comments), then this will advance the parser through all whitespace /// and comments to the next non-whitespace non-comment byte. /// /// If the `x` flag is disabled, then this is a no-op. /// /// This should be used selectively throughout the parser where /// arbitrary whitespace is permitted when the `x` flag is enabled. For /// example, `{ 5 , 6}` is equivalent to `{5,6}`. fn bump_space(&self) { if !self.flags().ignore_whitespace { return; } while !self.is_done() { if self.char().is_whitespace() { self.bump(); } else if self.char() == '#' { self.bump(); while !self.is_done() { let c = self.char(); self.bump(); if c == '\n' { break; } } } else { break; } } } /// Peek at the next character in the input without advancing the parser. /// /// If the input has been exhausted, then this returns `None`. fn peek(&self) -> Option<char> { if self.is_done() { return None; } self.pattern()[self.pos() + self.char().len_utf8()..].chars().next() } /// Peeks at the next character in the pattern from the current offset, and /// will ignore spaces when the parser is in whitespace insensitive mode. fn peek_space(&self) -> Option<char> { if !self.flags().ignore_whitespace { return self.peek(); } if self.is_done() { return None; } let mut start = self.pos() + self.char().len_utf8(); let mut in_comment = false; for (i, ch) in self.pattern()[start..].char_indices() { if ch.is_whitespace() { continue; } else if !in_comment && ch == '#' { in_comment = true; } else if in_comment && ch == '\n' { in_comment = false; } else { start += i; break; } } self.pattern()[start..].chars().next() } /// Return the next capturing index. Each subsequent call increments the /// internal index. Since the way capture indices are computed is a public /// API guarantee, use of this routine depends on the parser being depth /// first and left-to-right. /// /// If the capture limit is exceeded, then an error is returned. fn next_capture_index(&self) -> Result<u32, Error> { let current = self.capture_index.get(); let next = current .checked_add(1) .ok_or_else(|| Error::new(ERR_TOO_MANY_CAPTURES))?; self.capture_index.set(next); Ok(next) } /// Adds the given capture name to this parser. If this capture name has /// already been used, then an error is returned. fn add_capture_name(&self, name: &str) -> Result<(), Error> { let mut names = self.capture_names.borrow_mut(); match names.binary_search_by(|n| name.cmp(n)) { Ok(_) => Err(Error::new(ERR_DUPLICATE_CAPTURE_NAME)), Err(i) => { names.insert(i, name.to_string()); Ok(()) } } } /// Returns true if and only if the parser is positioned at a look-around /// prefix. The conditions under which this returns true must always /// correspond to a regular expression that would otherwise be consider /// invalid. /// /// This should only be called immediately after parsing the opening of /// a group or a set of flags. fn is_lookaround_prefix(&self) -> bool { self.bump_if("?=") || self.bump_if("?!") || self.bump_if("?<=") || self.bump_if("?<!") } } /// The actual parser. We try to break out each kind of regex syntax into its /// own routine. impl<'a> Parser<'a> { pub(super) fn parse(&self) -> Result<Hir, Error> { let depth = self.increment_depth()?; let mut alternates = vec![]; let mut concat = vec![]; loop { self.bump_space(); if self.is_done() { break; } match self.char() { '(' => { // Save the old flags and reset them only when we close // the group. let oldflags = *self.flags.borrow(); if let Some(sub) = self.parse_group()? { concat.push(sub); // We only reset them here because if 'parse_group' // returns None, then that means it handled a flag // directive, e.g., '(?ism)'. And the whole point is // that those flags remain active until either disabled // or the end of the pattern or current group. *self.flags.borrow_mut() = oldflags; } if self.char.get() != Some(')') { return Err(Error::new(ERR_UNCLOSED_GROUP)); } self.bump(); } ')' => { if depth == 0 { return Err(Error::new(ERR_UNOPENED_GROUP)); } break; } '|' => { alternates.push(Hir::concat(core::mem::take(&mut concat))); self.bump(); } '[' => concat.push(self.parse_class()?), '?' | '*' | '+' => { concat = self.parse_uncounted_repetition(concat)?; } '{' => { concat = self.parse_counted_repetition(concat)?; } _ => concat.push(self.parse_primitive()?), } } self.decrement_depth(); alternates.push(Hir::concat(concat)); // N.B. This strips off the "alternation" if there's only one branch. Ok(Hir::alternation(alternates)) } /// Parses a "primitive" pattern. A primitive is any expression that does /// not contain any sub-expressions. /// /// This assumes the parser is pointing at the beginning of the primitive. fn parse_primitive(&self) -> Result<Hir, Error> { let ch = self.char(); self.bump(); match ch { '\\' => self.parse_escape(), '.' => Ok(self.hir_dot()), '^' => Ok(self.hir_anchor_start()), '$' => Ok(self.hir_anchor_end()), ch => Ok(self.hir_char(ch)), } } /// Parse an escape sequence. This always results in a "primitive" HIR, /// that is, an HIR with no sub-expressions. /// /// This assumes the parser is positioned at the start of the sequence, /// immediately *after* the `\`. It advances the parser to the first /// position immediately following the escape sequence. fn parse_escape(&self) -> Result<Hir, Error> { if self.is_done() { return Err(Error::new(ERR_ESCAPE_UNEXPECTED_EOF)); } let ch = self.char(); // Put some of the more complicated routines into helpers. match ch { '0'..='9' => return Err(Error::new(ERR_BACKREF_UNSUPPORTED)), 'p' | 'P' => { return Err(Error::new(ERR_UNICODE_CLASS_UNSUPPORTED)) } 'x' | 'u' | 'U' => return self.parse_hex(), 'd' | 's' | 'w' | 'D' | 'S' | 'W' => { return Ok(self.parse_perl_class()); } _ => {} } // Handle all of the one letter sequences inline. self.bump(); if hir::is_meta_character(ch) || hir::is_escapeable_character(ch) { return Ok(self.hir_char(ch)); } let special = |ch| Ok(self.hir_char(ch)); match ch { 'a' => special('\x07'), 'f' => special('\x0C'), 't' => special('\t'), 'n' => special('\n'), 'r' => special('\r'), 'v' => special('\x0B'), 'A' => Ok(Hir::look(hir::Look::Start)), 'z' => Ok(Hir::look(hir::Look::End)), 'b' => Ok(Hir::look(hir::Look::Word)), 'B' => Ok(Hir::look(hir::Look::WordNegate)), _ => Err(Error::new(ERR_ESCAPE_UNRECOGNIZED)), } } /// Parse a hex representation of a Unicode codepoint. This handles both /// hex notations, i.e., `\xFF` and `\x{FFFF}`. This expects the parser to /// be positioned at the `x`, `u` or `U` prefix. The parser is advanced to /// the first character immediately following the hexadecimal literal. fn parse_hex(&self) -> Result<Hir, Error> { let digit_len = match self.char() { 'x' => 2, 'u' => 4, 'U' => 8, unk => unreachable!( "invalid start of fixed length hexadecimal number {}", unk ), }; if !self.bump_and_bump_space() { return Err(Error::new(ERR_HEX_UNEXPECTED_EOF)); } if self.char() == '{' { self.parse_hex_brace() } else { self.parse_hex_digits(digit_len) } } /// Parse an N-digit hex representation of a Unicode codepoint. This /// expects the parser to be positioned at the first digit and will advance /// the parser to the first character immediately following the escape /// sequence. /// /// The number of digits given must be 2 (for `\xNN`), 4 (for `\uNNNN`) /// or 8 (for `\UNNNNNNNN`). fn parse_hex_digits(&self, digit_len: usize) -> Result<Hir, Error> { let mut scratch = String::new(); for i in 0..digit_len { if i > 0 && !self.bump_and_bump_space() { return Err(Error::new(ERR_HEX_FIXED_UNEXPECTED_EOF)); } if !is_hex(self.char()) { return Err(Error::new(ERR_HEX_FIXED_INVALID_DIGIT)); } scratch.push(self.char()); } // The final bump just moves the parser past the literal, which may // be EOF. self.bump_and_bump_space(); match u32::from_str_radix(&scratch, 16).ok().and_then(char::from_u32) { None => Err(Error::new(ERR_HEX_FIXED_INVALID)), Some(ch) => Ok(self.hir_char(ch)), } } /// Parse a hex representation of any Unicode scalar value. This expects /// the parser to be positioned at the opening brace `{` and will advance /// the parser to the first character following the closing brace `}`. fn parse_hex_brace(&self) -> Result<Hir, Error> { let mut scratch = String::new(); while self.bump_and_bump_space() && self.char() != '}' { if !is_hex(self.char()) { return Err(Error::new(ERR_HEX_BRACE_INVALID_DIGIT)); } scratch.push(self.char()); } if self.is_done() { return Err(Error::new(ERR_HEX_BRACE_UNEXPECTED_EOF)); } assert_eq!(self.char(), '}'); self.bump_and_bump_space(); if scratch.is_empty() { return Err(Error::new(ERR_HEX_BRACE_EMPTY)); } match u32::from_str_radix(&scratch, 16).ok().and_then(char::from_u32) { None => Err(Error::new(ERR_HEX_BRACE_INVALID)), Some(ch) => Ok(self.hir_char(ch)), } } /// Parse a decimal number into a u32 while trimming leading and trailing /// whitespace. /// /// This expects the parser to be positioned at the first position where /// a decimal digit could occur. This will advance the parser to the byte /// immediately following the last contiguous decimal digit. /// /// If no decimal digit could be found or if there was a problem parsing /// the complete set of digits into a u32, then an error is returned. fn parse_decimal(&self) -> Result<u32, Error> { let mut scratch = String::new(); while !self.is_done() && self.char().is_whitespace() { self.bump(); } while !self.is_done() && '0' <= self.char() && self.char() <= '9' { scratch.push(self.char()); self.bump_and_bump_space(); } while !self.is_done() && self.char().is_whitespace() { self.bump_and_bump_space(); } let digits = scratch.as_str(); if digits.is_empty() { return Err(Error::new(ERR_DECIMAL_NO_DIGITS)); } match u32::from_str_radix(digits, 10).ok() { Some(n) => Ok(n), None => Err(Error::new(ERR_DECIMAL_INVALID)), } } /// Parses an uncounted repetition operator. An uncounted repetition /// operator includes `?`, `*` and `+`, but does not include the `{m,n}` /// syntax. The current character should be one of `?`, `*` or `+`. Any /// other character will result in a panic. /// /// This assumes that the parser is currently positioned at the repetition /// operator and advances the parser to the first character after the /// operator. (Note that the operator may include a single additional `?`, /// which makes the operator ungreedy.) /// /// The caller should include the concatenation that is being built. The /// concatenation returned includes the repetition operator applied to the /// last expression in the given concatenation. /// /// If the concatenation is empty, then this returns an error. fn parse_uncounted_repetition( &self, mut concat: Vec<Hir>, ) -> Result<Vec<Hir>, Error> { let sub = match concat.pop() { Some(hir) => Box::new(hir), None => { return Err(Error::new(ERR_UNCOUNTED_REP_SUB_MISSING)); } }; let (min, max) = match self.char() { '?' => (0, Some(1)), '*' => (0, None), '+' => (1, None), unk => unreachable!("unrecognized repetition operator '{}'", unk), }; let mut greedy = true; if self.bump() && self.char() == '?' { greedy = false; self.bump(); } if self.flags().swap_greed { greedy = !greedy; } concat.push(Hir::repetition(hir::Repetition { min, max, greedy, sub, })); Ok(concat) } /// Parses a counted repetition operation. A counted repetition operator /// corresponds to the `{m,n}` syntax, and does not include the `?`, `*` or /// `+` operators. /// /// This assumes that the parser is currently at the opening `{` and /// advances the parser to the first character after the operator. (Note /// that the operator may include a single additional `?`, which makes the /// operator ungreedy.) /// /// The caller should include the concatenation that is being built. The /// concatenation returned includes the repetition operator applied to the /// last expression in the given concatenation. /// /// If the concatenation is empty, then this returns an error. fn parse_counted_repetition( &self, mut concat: Vec<Hir>, ) -> Result<Vec<Hir>, Error> { assert_eq!(self.char(), '{', "expected opening brace"); let sub = match concat.pop() { Some(hir) => Box::new(hir), None => { return Err(Error::new(ERR_COUNTED_REP_SUB_MISSING)); } }; if !self.bump_and_bump_space() { return Err(Error::new(ERR_COUNTED_REP_UNCLOSED)); } let min = self.parse_decimal()?; let mut max = Some(min); if self.is_done() { return Err(Error::new(ERR_COUNTED_REP_MIN_UNCLOSED)); } if self.char() == ',' { if !self.bump_and_bump_space() { return Err(Error::new(ERR_COUNTED_REP_COMMA_UNCLOSED)); } if self.char() != '}' { max = Some(self.parse_decimal()?); } else { max = None; } if self.is_done() { return Err(Error::new(ERR_COUNTED_REP_MIN_MAX_UNCLOSED)); } } if self.char() != '}' { return Err(Error::new(ERR_COUNTED_REP_INVALID)); } let mut greedy = true; if self.bump_and_bump_space() && self.char() == '?' { greedy = false; self.bump(); } if self.flags().swap_greed { greedy = !greedy; } if max.map_or(false, |max| min > max) { return Err(Error::new(ERR_COUNTED_REP_INVALID_RANGE)); } concat.push(Hir::repetition(hir::Repetition { min, max, greedy, sub, })); Ok(concat) } /// Parses the part of a pattern that starts with a `(`. This is usually /// a group sub-expression, but might just be a directive that enables /// (or disables) certain flags. /// /// This assumes the parser is pointing at the opening `(`. fn parse_group(&self) -> Result<Option<Hir>, Error> { assert_eq!(self.char(), '('); self.bump_and_bump_space(); if self.is_lookaround_prefix() { return Err(Error::new(ERR_LOOK_UNSUPPORTED)); } if self.bump_if("?P<") || self.bump_if("?<") { let index = self.next_capture_index()?; let name = Some(Box::from(self.parse_capture_name()?)); let sub = Box::new(self.parse()?); let cap = hir::Capture { index, name, sub }; Ok(Some(Hir::capture(cap))) } else if self.bump_if("?") { if self.is_done() { return Err(Error::new(ERR_UNCLOSED_GROUP_QUESTION)); } let start = self.pos(); // The flags get reset in the top-level 'parse' routine. *self.flags.borrow_mut() = self.parse_flags()?; let consumed = self.pos() - start; if self.char() == ')' { // We don't allow empty flags, e.g., `(?)`. if consumed == 0 { return Err(Error::new(ERR_EMPTY_FLAGS)); } Ok(None) } else { assert_eq!(':', self.char()); self.bump(); self.parse().map(Some) } } else { let index = self.next_capture_index()?; let sub = Box::new(self.parse()?); let cap = hir::Capture { index, name: None, sub }; Ok(Some(Hir::capture(cap))) } } /// Parses a capture group name. Assumes that the parser is positioned at /// the first character in the name following the opening `<` (and may /// possibly be EOF). This advances the parser to the first character /// following the closing `>`. fn parse_capture_name(&self) -> Result<&str, Error> { if self.is_done() { return Err(Error::new(ERR_MISSING_GROUP_NAME)); } let start = self.pos(); loop { if self.char() == '>' { break; } if !is_capture_char(self.char(), self.pos() == start) { return Err(Error::new(ERR_INVALID_GROUP_NAME)); } if !self.bump() { break; } } let end = self.pos(); if self.is_done() { return Err(Error::new(ERR_UNCLOSED_GROUP_NAME)); } assert_eq!(self.char(), '>'); self.bump(); let name = &self.pattern()[start..end]; if name.is_empty() { return Err(Error::new(ERR_EMPTY_GROUP_NAME)); } self.add_capture_name(name)?; Ok(name) } /// Parse a sequence of flags starting at the current character. /// /// This advances the parser to the character immediately following the /// flags, which is guaranteed to be either `:` or `)`. /// /// # Errors /// /// If any flags are duplicated, then an error is returned. /// /// If the negation operator is used more than once, then an error is /// returned. /// /// If no flags could be found or if the negation operation is not followed /// by any flags, then an error is returned. fn parse_flags(&self) -> Result<Flags, Error> { let mut flags = *self.flags.borrow(); let mut negate = false; // Keeps track of whether the previous flag item was a '-'. We use this // to detect whether there is a dangling '-', which is invalid. let mut last_was_negation = false; // A set to keep track of the flags we've seen. Since all flags are // ASCII, we only need 128 bytes. let mut seen = [false; 128]; while self.char() != ':' && self.char() != ')' { if self.char() == '-' { last_was_negation = true; if negate { return Err(Error::new(ERR_FLAG_REPEATED_NEGATION)); } negate = true; } else { last_was_negation = false; self.parse_flag(&mut flags, negate)?; // OK because every valid flag is ASCII, and we're only here if // the flag is valid. let flag_byte = u8::try_from(self.char()).unwrap(); if seen[usize::from(flag_byte)] { return Err(Error::new(ERR_FLAG_DUPLICATE)); } seen[usize::from(flag_byte)] = true; } if !self.bump() { return Err(Error::new(ERR_FLAG_UNEXPECTED_EOF)); } } if last_was_negation { return Err(Error::new(ERR_FLAG_DANGLING_NEGATION)); } Ok(flags) } /// Parse the current character as a flag. Do not advance the parser. /// /// This sets the appropriate boolean value in place on the set of flags /// given. The boolean is inverted when `negate` is true. /// /// # Errors /// /// If the flag is not recognized, then an error is returned. fn parse_flag( &self, flags: &mut Flags, negate: bool, ) -> Result<(), Error> { let enabled = !negate; match self.char() { 'i' => flags.case_insensitive = enabled, 'm' => flags.multi_line = enabled, 's' => flags.dot_matches_new_line = enabled, 'U' => flags.swap_greed = enabled, 'R' => flags.crlf = enabled, 'x' => flags.ignore_whitespace = enabled, // We make a special exception for this flag where we let it // through as a recognized flag, but treat it as a no-op. This in // practice retains some compatibility with the regex crate. It is // a little suspect to do this, but for example, '(?-u:\b).+' in // the regex crate is equivalent to '\b.+' in regex-lite. 'u' => {} _ => return Err(Error::new(ERR_FLAG_UNRECOGNIZED)), } Ok(()) } /// Parse a standard character class consisting primarily of characters or /// character ranges. /// /// This assumes the parser is positioned at the opening `[`. If parsing /// is successful, then the parser is advanced to the position immediately /// following the closing `]`. fn parse_class(&self) -> Result<Hir, Error> { assert_eq!(self.char(), '['); let mut union = vec![]; if !self.bump_and_bump_space() { return Err(Error::new(ERR_CLASS_UNCLOSED)); } // Determine whether the class is negated or not. let negate = if self.char() != '^' { false } else { if !self.bump_and_bump_space() { return Err(Error::new(ERR_CLASS_UNCLOSED_AFTER_NEGATION)); } true }; // Accept any number of `-` as literal `-`. while self.char() == '-' { union.push(hir::ClassRange { start: '-', end: '-' }); if !self.bump_and_bump_space() { return Err(Error::new(ERR_CLASS_UNCLOSED_AFTER_DASH)); } } // If `]` is the *first* char in a set, then interpret it as a literal // `]`. That is, an empty class is impossible to write. if union.is_empty() && self.char() == ']' { union.push(hir::ClassRange { start: ']', end: ']' }); if !self.bump_and_bump_space() { return Err(Error::new(ERR_CLASS_UNCLOSED_AFTER_CLOSING)); } } loop { self.bump_space(); if self.is_done() { return Err(Error::new(ERR_CLASS_UNCLOSED)); } match self.char() { '[' => { // Attempt to treat this as the beginning of a POSIX class. // If POSIX class parsing fails, then the parser backs up // to `[`. if let Some(class) = self.maybe_parse_posix_class() { union.extend_from_slice(&class.ranges); continue; } // ... otherwise we don't support nested classes. return Err(Error::new(ERR_CLASS_NEST_UNSUPPORTED)); } ']' => { self.bump(); let mut class = hir::Class::new(union); // Note that we must apply case folding before negation! // Consider `(?i)[^x]`. If we applied negation first, then // the result would be the character class that matched any // Unicode scalar value. if self.flags().case_insensitive { class.ascii_case_fold(); } if negate { class.negate(); } return Ok(Hir::class(class)); } '&' if self.peek() == Some('&') => { return Err(Error::new( ERR_CLASS_INTERSECTION_UNSUPPORTED, )); } '-' if self.peek() == Some('-') => { return Err(Error::new(ERR_CLASS_DIFFERENCE_UNSUPPORTED)); } '~' if self.peek() == Some('~') => { return Err(Error::new( ERR_CLASS_SYMDIFFERENCE_UNSUPPORTED, )); } _ => self.parse_class_range(&mut union)?, } } } /// Parse a single primitive item in a character class set. The item to /// be parsed can either be one of a simple literal character, a range /// between two simple literal characters or a "primitive" character /// class like `\w`. /// /// If an invalid escape is found, or if a character class is found where /// a simple literal is expected (e.g., in a range), then an error is /// returned. /// /// Otherwise, the range (or ranges) are appended to the given union of /// ranges. fn parse_class_range( &self, union: &mut Vec<hir::ClassRange>, ) -> Result<(), Error> { let prim1 = self.parse_class_item()?; self.bump_space(); if self.is_done() { return Err(Error::new(ERR_CLASS_UNCLOSED_AFTER_ITEM)); } // If the next char isn't a `-`, then we don't have a range. // There are two exceptions. If the char after a `-` is a `]`, then // `-` is interpreted as a literal `-`. Alternatively, if the char // after a `-` is a `-`, then `--` corresponds to a "difference" // operation. (Which we don't support in regex-lite, but error about // specifically in an effort to be loud about differences between the // main regex crate where possible.) if self.char() != '-' || self.peek_space() == Some(']') || self.peek_space() == Some('-') { union.extend_from_slice(&into_class_item_ranges(prim1)?); return Ok(()); } // OK, now we're parsing a range, so bump past the `-` and parse the // second half of the range. if !self.bump_and_bump_space() { return Err(Error::new(ERR_CLASS_UNCLOSED_AFTER_DASH)); } let prim2 = self.parse_class_item()?; let range = hir::ClassRange { start: into_class_item_range(prim1)?, end: into_class_item_range(prim2)?, }; if range.start > range.end { return Err(Error::new(ERR_CLASS_INVALID_RANGE)); } union.push(range); Ok(()) } /// Parse a single item in a character class as a primitive, where the /// primitive either consists of a verbatim literal or a single escape /// sequence. /// /// This assumes the parser is positioned at the beginning of a primitive, /// and advances the parser to the first position after the primitive if /// successful. /// /// Note that it is the caller's responsibility to report an error if an /// illegal primitive was parsed. fn parse_class_item(&self) -> Result<Hir, Error> { let ch = self.char(); self.bump(); if ch == '\\' { self.parse_escape() } else { Ok(Hir::char(ch)) } } /// Attempt to parse a POSIX character class, e.g., `[:alnum:]`. /// /// This assumes the parser is positioned at the opening `[`. /// /// If no valid POSIX character class could be found, then this does not /// advance the parser and `None` is returned. Otherwise, the parser is /// advanced to the first byte following the closing `]` and the /// corresponding POSIX class is returned. fn maybe_parse_posix_class(&self) -> Option<hir::Class> { // POSIX character classes are interesting from a parsing perspective // because parsing cannot fail with any interesting error. For example, // in order to use an POSIX character class, it must be enclosed in // double brackets, e.g., `[[:alnum:]]`. Alternatively, you might think // of it as "POSIX character classes have the syntax `[:NAME:]` which // can only appear within character brackets." This means that things // like `[[:lower:]A]` are legal constructs. // // However, if one types an incorrect POSIX character class, e.g., // `[[:loower:]]`, then we treat that as if it were normal nested // character class containing the characters `:elorw`. (Which isn't // supported and results in an error in regex-lite.) One might argue // that we should return an error instead since the repeated colons // give away the intent to write an POSIX class. But what if the user // typed `[[:lower]]` instead? How can we tell that was intended to be // a POSXI class and not just a normal nested class? // // Reasonable people can probably disagree over this, but for better // or worse, we implement semantics that never fails at the expense of // better failure modes. assert_eq!(self.char(), '['); // If parsing fails, then we back up the parser to this starting point. let start_pos = self.pos(); let start_char = self.char.get(); let reset = || { self.pos.set(start_pos); self.char.set(start_char); }; let mut negated = false; if !self.bump() || self.char() != ':' { reset(); return None; } if !self.bump() { reset(); return None; } if self.char() == '^' { negated = true; if !self.bump() { reset(); return None; } } let name_start = self.pos(); while self.char() != ':' && self.bump() {} if self.is_done() { reset(); return None; } let name = &self.pattern()[name_start..self.pos()]; if !self.bump_if(":]") { reset(); return None; } if let Ok(ranges) = posix_class(name) { let mut class = hir::Class::new(ranges); if negated { class.negate(); } return Some(class); } reset(); None } /// Parse a Perl character class, e.g., `\d` or `\W`. This assumes the /// parser is currently at a valid character class name and will be /// advanced to the character immediately following the class. fn parse_perl_class(&self) -> Hir { let ch = self.char(); self.bump(); let mut class = hir::Class::new(match ch { 'd' | 'D' => posix_class("digit").unwrap(), 's' | 'S' => posix_class("space").unwrap(), 'w' | 'W' => posix_class("word").unwrap(), unk => unreachable!("invalid Perl class \\{}", unk), }); if ch.is_ascii_uppercase() { class.negate(); } Hir::class(class) } fn hir_dot(&self) -> Hir { if self.flags().dot_matches_new_line { Hir::class(hir::Class::new([hir::ClassRange { start: '\x00', end: '\u{10FFFF}', }])) } else if self.flags().crlf { Hir::class(hir::Class::new([ hir::ClassRange { start: '\x00', end: '\x09' }, hir::ClassRange { start: '\x0B', end: '\x0C' }, hir::ClassRange { start: '\x0E', end: '\u{10FFFF}' }, ])) } else { Hir::class(hir::Class::new([ hir::ClassRange { start: '\x00', end: '\x09' }, hir::ClassRange { start: '\x0B', end: '\u{10FFFF}' }, ])) } } fn hir_anchor_start(&self) -> Hir { let look = if self.flags().multi_line { if self.flags().crlf { hir::Look::StartCRLF } else { hir::Look::StartLF } } else { hir::Look::Start }; Hir::look(look) } fn hir_anchor_end(&self) -> Hir { let look = if self.flags().multi_line { if self.flags().crlf { hir::Look::EndCRLF } else { hir::Look::EndLF } } else { hir::Look::End }; Hir::look(look) } fn hir_char(&self, ch: char) -> Hir { if self.flags().case_insensitive { let this = hir::ClassRange { start: ch, end: ch }; if let Some(folded) = this.ascii_case_fold() { return Hir::class(hir::Class::new([this, folded])); } } Hir::char(ch) } } /// Converts the given Hir to a literal char if the Hir is just a single /// character. Otherwise this returns an error. /// /// This is useful in contexts where you can only accept a single character, /// but where it is convenient to parse something more general. For example, /// parsing a single part of a character class range. It's useful to reuse /// the literal parsing code, but that code can itself return entire classes /// which can't be used as the start/end of a class range. fn into_class_item_range(hir: Hir) -> Result<char, Error> { match hir.kind { HirKind::Char(ch) => Ok(ch), _ => Err(Error::new(ERR_CLASS_INVALID_RANGE_ITEM)), } } fn into_class_item_ranges(hir: Hir) -> Result<Vec<hir::ClassRange>, Error> { match hir.kind { HirKind::Char(ch) => Ok(vec![hir::ClassRange { start: ch, end: ch }]), HirKind::Class(hir::Class { ranges }) => Ok(ranges), _ => Err(Error::new(ERR_CLASS_INVALID_ITEM)), } } /// Returns an iterator of character class ranges for the given named POSIX /// character class. If no such character class exists for the name given, then /// an error is returned. fn posix_class( kind: &str, ) -> Result<impl Iterator<Item = hir::ClassRange>, Error> { let slice: &'static [(u8, u8)] = match kind { "alnum" => &[(b'0', b'9'), (b'A', b'Z'), (b'a', b'z')], "alpha" => &[(b'A', b'Z'), (b'a', b'z')], "ascii" => &[(b'\x00', b'\x7F')], "blank" => &[(b'\t', b'\t'), (b' ', b' ')], "cntrl" => &[(b'\x00', b'\x1F'), (b'\x7F', b'\x7F')], "digit" => &[(b'0', b'9')], "graph" => &[(b'!', b'~')], "lower" => &[(b'a', b'z')], "print" => &[(b' ', b'~')], "punct" => &[(b'!', b'/'), (b':', b'@'), (b'[', b'`'), (b'{', b'~')], "space" => &[ (b'\t', b'\t'), (b'\n', b'\n'), (b'\x0B', b'\x0B'), (b'\x0C', b'\x0C'), (b'\r', b'\r'), (b' ', b' '), ], "upper" => &[(b'A', b'Z')], "word" => &[(b'0', b'9'), (b'A', b'Z'), (b'_', b'_'), (b'a', b'z')], "xdigit" => &[(b'0', b'9'), (b'A', b'F'), (b'a', b'f')], _ => return Err(Error::new(ERR_POSIX_CLASS_UNRECOGNIZED)), }; Ok(slice.iter().map(|&(start, end)| hir::ClassRange { start: char::from(start), end: char::from(end), })) } /// Returns true if the given character is a hexadecimal digit. fn is_hex(c: char) -> bool { ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') } /// Returns true if the given character is a valid in a capture group name. /// /// If `first` is true, then `c` is treated as the first character in the /// group name (which must be alphabetic or underscore). fn is_capture_char(c: char, first: bool) -> bool { if first { c == '_' || c.is_alphabetic() } else { c == '_' || c == '.' || c == '[' || c == ']' || c.is_alphanumeric() } } #[cfg(test)] mod tests { use super::*; fn p(pattern: &str) -> Hir { Parser::new(Config::default(), pattern).parse().unwrap() } fn perr(pattern: &str) -> String { Parser::new(Config::default(), pattern) .parse() .unwrap_err() .to_string() } fn class<I: IntoIterator<Item = (char, char)>>(it: I) -> Hir { Hir::class(hir::Class::new( it.into_iter().map(|(start, end)| hir::ClassRange { start, end }), )) } fn singles<I: IntoIterator<Item = char>>(it: I) -> Hir { Hir::class(hir::Class::new( it.into_iter().map(|ch| hir::ClassRange { start: ch, end: ch }), )) } fn posix(name: &str) -> Hir { Hir::class(hir::Class::new(posix_class(name).unwrap())) } fn cap(index: u32, sub: Hir) -> Hir { Hir::capture(hir::Capture { index, name: None, sub: Box::new(sub) }) } fn named_cap(index: u32, name: &str, sub: Hir) -> Hir { Hir::capture(hir::Capture { index, name: Some(Box::from(name)), sub: Box::new(sub), }) } #[test] fn ok_literal() { assert_eq!(p("a"), Hir::char('a')); assert_eq!(p("ab"), Hir::concat(vec![Hir::char('a'), Hir::char('b')])); assert_eq!(p("💩"), Hir::char('💩')); } #[test] fn ok_meta_escapes() { assert_eq!(p(r"\*"), Hir::char('*')); assert_eq!(p(r"\+"), Hir::char('+')); assert_eq!(p(r"\?"), Hir::char('?')); assert_eq!(p(r"\|"), Hir::char('|')); assert_eq!(p(r"\("), Hir::char('(')); assert_eq!(p(r"\)"), Hir::char(')')); assert_eq!(p(r"\^"), Hir::char('^')); assert_eq!(p(r"\$"), Hir::char('$')); assert_eq!(p(r"\["), Hir::char('[')); assert_eq!(p(r"\]"), Hir::char(']')); } #[test] fn ok_special_escapes() { assert_eq!(p(r"\a"), Hir::char('\x07')); assert_eq!(p(r"\f"), Hir::char('\x0C')); assert_eq!(p(r"\t"), Hir::char('\t')); assert_eq!(p(r"\n"), Hir::char('\n')); assert_eq!(p(r"\r"), Hir::char('\r')); assert_eq!(p(r"\v"), Hir::char('\x0B')); assert_eq!(p(r"\A"), Hir::look(hir::Look::Start)); assert_eq!(p(r"\z"), Hir::look(hir::Look::End)); assert_eq!(p(r"\b"), Hir::look(hir::Look::Word)); assert_eq!(p(r"\B"), Hir::look(hir::Look::WordNegate)); } #[test] fn ok_hex() { // fixed length assert_eq!(p(r"\x41"), Hir::char('A')); assert_eq!(p(r"\u2603"), Hir::char('☃')); assert_eq!(p(r"\U0001F4A9"), Hir::char('💩')); // braces assert_eq!(p(r"\x{1F4A9}"), Hir::char('💩')); assert_eq!(p(r"\u{1F4A9}"), Hir::char('💩')); assert_eq!(p(r"\U{1F4A9}"), Hir::char('💩')); } #[test] fn ok_perl() { assert_eq!(p(r"\d"), posix("digit")); assert_eq!(p(r"\s"), posix("space")); assert_eq!(p(r"\w"), posix("word")); let negated = |name| { let mut class = hir::Class::new(posix_class(name).unwrap()); class.negate(); Hir::class(class) }; assert_eq!(p(r"\D"), negated("digit")); assert_eq!(p(r"\S"), negated("space")); assert_eq!(p(r"\W"), negated("word")); } #[test] fn ok_flags_and_primitives() { assert_eq!(p(r"a"), Hir::char('a')); assert_eq!(p(r"(?i:a)"), singles(['A', 'a'])); assert_eq!(p(r"^"), Hir::look(hir::Look::Start)); assert_eq!(p(r"(?m:^)"), Hir::look(hir::Look::StartLF)); assert_eq!(p(r"(?mR:^)"), Hir::look(hir::Look::StartCRLF)); assert_eq!(p(r"$"), Hir::look(hir::Look::End)); assert_eq!(p(r"(?m:$)"), Hir::look(hir::Look::EndLF)); assert_eq!(p(r"(?mR:$)"), Hir::look(hir::Look::EndCRLF)); assert_eq!(p(r"."), class([('\x00', '\x09'), ('\x0B', '\u{10FFFF}')])); assert_eq!( p(r"(?R:.)"), class([ ('\x00', '\x09'), ('\x0B', '\x0C'), ('\x0E', '\u{10FFFF}'), ]) ); assert_eq!(p(r"(?s:.)"), class([('\x00', '\u{10FFFF}')])); assert_eq!(p(r"(?sR:.)"), class([('\x00', '\u{10FFFF}')])); } #[test] fn ok_alternate() { assert_eq!( p(r"a|b"), Hir::alternation(vec![Hir::char('a'), Hir::char('b')]) ); assert_eq!( p(r"(?:a|b)"), Hir::alternation(vec![Hir::char('a'), Hir::char('b')]) ); assert_eq!( p(r"(a|b)"), cap(1, Hir::alternation(vec![Hir::char('a'), Hir::char('b')])) ); assert_eq!( p(r"(?<foo>a|b)"), named_cap( 1, "foo", Hir::alternation(vec![Hir::char('a'), Hir::char('b')]) ) ); assert_eq!( p(r"a|b|c"), Hir::alternation(vec![ Hir::char('a'), Hir::char('b'), Hir::char('c') ]) ); assert_eq!( p(r"ax|by|cz"), Hir::alternation(vec![ Hir::concat(vec![Hir::char('a'), Hir::char('x')]), Hir::concat(vec![Hir::char('b'), Hir::char('y')]), Hir::concat(vec![Hir::char('c'), Hir::char('z')]), ]) ); assert_eq!( p(r"(ax|(by|(cz)))"), cap( 1, Hir::alternation(vec![ Hir::concat(vec![Hir::char('a'), Hir::char('x')]), cap( 2, Hir::alternation(vec![ Hir::concat(vec![Hir::char('b'), Hir::char('y')]), cap( 3, Hir::concat(vec![ Hir::char('c'), Hir::char('z') ]) ), ]) ), ]) ) ); assert_eq!( p(r"|"), Hir::alternation(vec![Hir::empty(), Hir::empty()]) ); assert_eq!( p(r"||"), Hir::alternation(vec![Hir::empty(), Hir::empty(), Hir::empty()]) ); assert_eq!( p(r"a|"), Hir::alternation(vec![Hir::char('a'), Hir::empty()]) ); assert_eq!( p(r"|a"), Hir::alternation(vec![Hir::empty(), Hir::char('a')]) ); assert_eq!( p(r"(|)"), cap(1, Hir::alternation(vec![Hir::empty(), Hir::empty()])) ); assert_eq!( p(r"(a|)"), cap(1, Hir::alternation(vec![Hir::char('a'), Hir::empty()])) ); assert_eq!( p(r"(|a)"), cap(1, Hir::alternation(vec![Hir::empty(), Hir::char('a')])) ); } #[test] fn ok_flag_group() { assert_eq!( p("a(?i:b)"), Hir::concat(vec![Hir::char('a'), singles(['B', 'b'])]) ); } #[test] fn ok_flag_directive() { assert_eq!(p("(?i)a"), singles(['A', 'a'])); assert_eq!(p("a(?i)"), Hir::char('a')); assert_eq!( p("a(?i)b"), Hir::concat(vec![Hir::char('a'), singles(['B', 'b'])]) ); assert_eq!( p("a(?i)a(?-i)a"), Hir::concat(vec![ Hir::char('a'), singles(['A', 'a']), Hir::char('a'), ]) ); assert_eq!( p("a(?:(?i)a)a"), Hir::concat(vec![ Hir::char('a'), singles(['A', 'a']), Hir::char('a'), ]) ); assert_eq!( p("a((?i)a)a"), Hir::concat(vec![ Hir::char('a'), cap(1, singles(['A', 'a'])), Hir::char('a'), ]) ); } #[test] fn ok_uncounted_repetition() { assert_eq!( p(r"a?"), Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy: true, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a*"), Hir::repetition(hir::Repetition { min: 0, max: None, greedy: true, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a+"), Hir::repetition(hir::Repetition { min: 1, max: None, greedy: true, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a??"), Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy: false, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a*?"), Hir::repetition(hir::Repetition { min: 0, max: None, greedy: false, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a+?"), Hir::repetition(hir::Repetition { min: 1, max: None, greedy: false, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a?b"), Hir::concat(vec![ Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy: true, sub: Box::new(Hir::char('a')), }), Hir::char('b'), ]), ); assert_eq!( p(r"ab?"), Hir::concat(vec![ Hir::char('a'), Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy: true, sub: Box::new(Hir::char('b')), }), ]), ); assert_eq!( p(r"(?:ab)?"), Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy: true, sub: Box::new(Hir::concat(vec![ Hir::char('a'), Hir::char('b') ])), }), ); assert_eq!( p(r"(ab)?"), Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy: true, sub: Box::new(cap( 1, Hir::concat(vec![Hir::char('a'), Hir::char('b')]) )), }), ); assert_eq!( p(r"|a?"), Hir::alternation(vec![ Hir::empty(), Hir::repetition(hir::Repetition { min: 0, max: Some(1), greedy: true, sub: Box::new(Hir::char('a')), }) ]), ); } #[test] fn ok_counted_repetition() { assert_eq!( p(r"a{5}"), Hir::repetition(hir::Repetition { min: 5, max: Some(5), greedy: true, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a{5}?"), Hir::repetition(hir::Repetition { min: 5, max: Some(5), greedy: false, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a{5,}"), Hir::repetition(hir::Repetition { min: 5, max: None, greedy: true, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a{5,9}"), Hir::repetition(hir::Repetition { min: 5, max: Some(9), greedy: true, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"ab{5}c"), Hir::concat(vec![ Hir::char('a'), Hir::repetition(hir::Repetition { min: 5, max: Some(5), greedy: true, sub: Box::new(Hir::char('b')), }), Hir::char('c'), ]), ); assert_eq!( p(r"a{ 5 }"), Hir::repetition(hir::Repetition { min: 5, max: Some(5), greedy: true, sub: Box::new(Hir::char('a')), }), ); assert_eq!( p(r"a{ 5 , 9 }"), Hir::repetition(hir::Repetition { min: 5, max: Some(9), greedy: true, sub: Box::new(Hir::char('a')), }), ); } #[test] fn ok_group_unnamed() { assert_eq!(p("(a)"), cap(1, Hir::char('a'))); assert_eq!( p("(ab)"), cap(1, Hir::concat(vec![Hir::char('a'), Hir::char('b')])) ); } #[test] fn ok_group_named() { assert_eq!(p("(?P<foo>a)"), named_cap(1, "foo", Hir::char('a'))); assert_eq!(p("(?<foo>a)"), named_cap(1, "foo", Hir::char('a'))); assert_eq!( p("(?P<foo>ab)"), named_cap( 1, "foo", Hir::concat(vec![Hir::char('a'), Hir::char('b')]) ) ); assert_eq!( p("(?<foo>ab)"), named_cap( 1, "foo", Hir::concat(vec![Hir::char('a'), Hir::char('b')]) ) ); assert_eq!(p(r"(?<a>z)"), named_cap(1, "a", Hir::char('z'))); assert_eq!(p(r"(?P<a>z)"), named_cap(1, "a", Hir::char('z'))); assert_eq!(p(r"(?<a_1>z)"), named_cap(1, "a_1", Hir::char('z'))); assert_eq!(p(r"(?P<a_1>z)"), named_cap(1, "a_1", Hir::char('z'))); assert_eq!(p(r"(?<a.1>z)"), named_cap(1, "a.1", Hir::char('z'))); assert_eq!(p(r"(?P<a.1>z)"), named_cap(1, "a.1", Hir::char('z'))); assert_eq!(p(r"(?<a[1]>z)"), named_cap(1, "a[1]", Hir::char('z'))); assert_eq!(p(r"(?P<a[1]>z)"), named_cap(1, "a[1]", Hir::char('z'))); assert_eq!(p(r"(?<a¾>z)"), named_cap(1, "a¾", Hir::char('z'))); assert_eq!(p(r"(?P<a¾>z)"), named_cap(1, "a¾", Hir::char('z'))); assert_eq!(p(r"(?<名字>z)"), named_cap(1, "名字", Hir::char('z'))); assert_eq!(p(r"(?P<名字>z)"), named_cap(1, "名字", Hir::char('z'))); } #[test] fn ok_class() { assert_eq!(p(r"[a]"), singles(['a'])); assert_eq!(p(r"[a\]]"), singles(['a', ']'])); assert_eq!(p(r"[a\-z]"), singles(['a', '-', 'z'])); assert_eq!(p(r"[ab]"), class([('a', 'b')])); assert_eq!(p(r"[a-]"), singles(['a', '-'])); assert_eq!(p(r"[-a]"), singles(['a', '-'])); assert_eq!(p(r"[--a]"), singles(['a', '-'])); assert_eq!(p(r"[---a]"), singles(['a', '-'])); assert_eq!(p(r"[[:alnum:]]"), posix("alnum")); assert_eq!(p(r"[\w]"), posix("word")); assert_eq!(p(r"[a\wz]"), posix("word")); assert_eq!(p(r"[\s\S]"), class([('\x00', '\u{10FFFF}')])); assert_eq!(p(r"[^\s\S]"), Hir::fail()); assert_eq!(p(r"[a-cx-z]"), class([('a', 'c'), ('x', 'z')])); assert_eq!(p(r"[☃-⛄]"), class([('☃', '⛄')])); assert_eq!(p(r"[]]"), singles([']'])); assert_eq!(p(r"[]a]"), singles([']', 'a'])); assert_eq!(p(r"[]\[]"), singles(['[', ']'])); assert_eq!(p(r"[\[]"), singles(['['])); assert_eq!(p(r"(?i)[a]"), singles(['A', 'a'])); assert_eq!(p(r"(?i)[A]"), singles(['A', 'a'])); assert_eq!(p(r"(?i)[k]"), singles(['K', 'k'])); assert_eq!(p(r"(?i)[s]"), singles(['S', 's'])); assert_eq!(p(r"(?i)[β]"), singles(['β'])); assert_eq!(p(r"[^^]"), class([('\x00', ']'), ('_', '\u{10FFFF}')])); assert_eq!( p(r"[^-a]"), class([('\x00', ','), ('.', '`'), ('b', '\u{10FFFF}')]) ); assert_eq!( p(r"[-]a]"), Hir::concat(vec![singles(['-']), Hir::char('a'), Hir::char(']')]) ); } #[test] fn ok_verbatim() { assert_eq!( p(r"(?x)a{5,9} ?"), Hir::repetition(hir::Repetition { min: 5, max: Some(9), greedy: false, sub: Box::new(Hir::char('a')), }) ); assert_eq!(p(r"(?x)[ a]"), singles(['a'])); assert_eq!( p(r"(?x)[ ^ a]"), class([('\x00', '`'), ('b', '\u{10FFFF}')]) ); assert_eq!(p(r"(?x)[ - a]"), singles(['a', '-'])); assert_eq!(p(r"(?x)[ ] a]"), singles([']', 'a'])); assert_eq!( p(r"(?x)a b"), Hir::concat(vec![Hir::char('a'), Hir::char('b')]) ); assert_eq!( p(r"(?x)a b(?-x)a b"), Hir::concat(vec![ Hir::char('a'), Hir::char('b'), Hir::char('a'), Hir::char(' '), Hir::char('b'), ]) ); assert_eq!( p(r"a (?x:a )a "), Hir::concat(vec![ Hir::char('a'), Hir::char(' '), Hir::char('a'), Hir::char('a'), Hir::char(' '), ]) ); assert_eq!( p(r"(?x)( ?P<foo> a )"), named_cap(1, "foo", Hir::char('a')), ); assert_eq!(p(r"(?x)( a )"), cap(1, Hir::char('a'))); assert_eq!(p(r"(?x)( ?: a )"), Hir::char('a')); assert_eq!(p(r"(?x)\x { 53 }"), Hir::char('\x53')); assert_eq!(p(r"(?x)\ "), Hir::char(' ')); } #[test] fn ok_comments() { let pat = "(?x) # This is comment 1. foo # This is comment 2. # This is comment 3. bar # This is comment 4."; assert_eq!( p(pat), Hir::concat(vec![ Hir::char('f'), Hir::char('o'), Hir::char('o'), Hir::char('b'), Hir::char('a'), Hir::char('r'), ]) ); } #[test] fn err_standard() { assert_eq!( ERR_TOO_MUCH_NESTING, perr("(((((((((((((((((((((((((((((((((((((((((((((((((((a)))))))))))))))))))))))))))))))))))))))))))))))))))"), ); // This one is tricky, because the only way it can happen is if the // number of captures overflows u32. Perhaps we should allow setting a // lower limit? // assert_eq!(ERR_TOO_MANY_CAPTURES, perr("")); assert_eq!(ERR_DUPLICATE_CAPTURE_NAME, perr(r"(?P<a>y)(?P<a>z)")); assert_eq!(ERR_UNCLOSED_GROUP, perr("(")); assert_eq!(ERR_UNCLOSED_GROUP_QUESTION, perr("(?")); assert_eq!(ERR_UNOPENED_GROUP, perr(")")); assert_eq!(ERR_LOOK_UNSUPPORTED, perr(r"(?=a)")); assert_eq!(ERR_LOOK_UNSUPPORTED, perr(r"(?!a)")); assert_eq!(ERR_LOOK_UNSUPPORTED, perr(r"(?<=a)")); assert_eq!(ERR_LOOK_UNSUPPORTED, perr(r"(?<!a)")); assert_eq!(ERR_EMPTY_FLAGS, perr(r"(?)")); assert_eq!(ERR_MISSING_GROUP_NAME, perr(r"(?P<")); assert_eq!(ERR_MISSING_GROUP_NAME, perr(r"(?<")); assert_eq!(ERR_INVALID_GROUP_NAME, perr(r"(?P<1abc>z)")); assert_eq!(ERR_INVALID_GROUP_NAME, perr(r"(?<1abc>z)")); assert_eq!(ERR_INVALID_GROUP_NAME, perr(r"(?<¾>z)")); assert_eq!(ERR_INVALID_GROUP_NAME, perr(r"(?<¾a>z)")); assert_eq!(ERR_INVALID_GROUP_NAME, perr(r"(?<☃>z)")); assert_eq!(ERR_INVALID_GROUP_NAME, perr(r"(?<a☃>z)")); assert_eq!(ERR_UNCLOSED_GROUP_NAME, perr(r"(?P<foo")); assert_eq!(ERR_UNCLOSED_GROUP_NAME, perr(r"(?<foo")); assert_eq!(ERR_EMPTY_GROUP_NAME, perr(r"(?P<>z)")); assert_eq!(ERR_EMPTY_GROUP_NAME, perr(r"(?<>z)")); assert_eq!(ERR_FLAG_UNRECOGNIZED, perr(r"(?z:foo)")); assert_eq!(ERR_FLAG_REPEATED_NEGATION, perr(r"(?s-i-R)")); assert_eq!(ERR_FLAG_DUPLICATE, perr(r"(?isi)")); assert_eq!(ERR_FLAG_DUPLICATE, perr(r"(?is-i)")); assert_eq!(ERR_FLAG_UNEXPECTED_EOF, perr(r"(?is")); assert_eq!(ERR_FLAG_DANGLING_NEGATION, perr(r"(?is-:foo)")); assert_eq!(ERR_HEX_BRACE_INVALID_DIGIT, perr(r"\x{Z}")); assert_eq!(ERR_HEX_BRACE_UNEXPECTED_EOF, perr(r"\x{")); assert_eq!(ERR_HEX_BRACE_UNEXPECTED_EOF, perr(r"\x{A")); assert_eq!(ERR_HEX_BRACE_EMPTY, perr(r"\x{}")); assert_eq!(ERR_HEX_BRACE_INVALID, perr(r"\x{FFFFFFFFFFFFFFFFF}")); assert_eq!(ERR_HEX_FIXED_UNEXPECTED_EOF, perr(r"\xA")); assert_eq!(ERR_HEX_FIXED_INVALID_DIGIT, perr(r"\xZ")); assert_eq!(ERR_HEX_FIXED_INVALID_DIGIT, perr(r"\xZA")); assert_eq!(ERR_HEX_FIXED_INVALID_DIGIT, perr(r"\xAZ")); assert_eq!(ERR_HEX_FIXED_INVALID, perr(r"\uD800")); assert_eq!(ERR_HEX_FIXED_INVALID, perr(r"\UFFFFFFFF")); assert_eq!(ERR_HEX_UNEXPECTED_EOF, perr(r"\x")); assert_eq!(ERR_ESCAPE_UNEXPECTED_EOF, perr(r"\")); assert_eq!(ERR_BACKREF_UNSUPPORTED, perr(r"\0")); assert_eq!(ERR_BACKREF_UNSUPPORTED, perr(r"\1")); assert_eq!(ERR_BACKREF_UNSUPPORTED, perr(r"\8")); assert_eq!(ERR_UNICODE_CLASS_UNSUPPORTED, perr(r"\pL")); assert_eq!(ERR_UNICODE_CLASS_UNSUPPORTED, perr(r"\p{L}")); assert_eq!(ERR_ESCAPE_UNRECOGNIZED, perr(r"\i")); assert_eq!(ERR_ESCAPE_UNRECOGNIZED, perr(r"\<")); assert_eq!(ERR_ESCAPE_UNRECOGNIZED, perr(r"\>")); assert_eq!(ERR_UNCOUNTED_REP_SUB_MISSING, perr(r"?")); assert_eq!(ERR_UNCOUNTED_REP_SUB_MISSING, perr(r"*")); assert_eq!(ERR_UNCOUNTED_REP_SUB_MISSING, perr(r"+")); assert_eq!(ERR_UNCOUNTED_REP_SUB_MISSING, perr(r"(+)")); assert_eq!(ERR_UNCOUNTED_REP_SUB_MISSING, perr(r"|?")); assert_eq!(ERR_UNCOUNTED_REP_SUB_MISSING, perr(r"(?i)?")); assert_eq!(ERR_COUNTED_REP_SUB_MISSING, perr(r"{5}")); assert_eq!(ERR_COUNTED_REP_SUB_MISSING, perr(r"({5})")); assert_eq!(ERR_COUNTED_REP_SUB_MISSING, perr(r"(?i){5}")); assert_eq!(ERR_COUNTED_REP_UNCLOSED, perr(r"a{")); assert_eq!(ERR_COUNTED_REP_MIN_UNCLOSED, perr(r"a{5")); assert_eq!(ERR_COUNTED_REP_COMMA_UNCLOSED, perr(r"a{5,")); assert_eq!(ERR_COUNTED_REP_MIN_MAX_UNCLOSED, perr(r"a{5,6")); assert_eq!(ERR_COUNTED_REP_INVALID, perr(r"a{5,6Z")); assert_eq!(ERR_COUNTED_REP_INVALID_RANGE, perr(r"a{6,5}")); assert_eq!(ERR_DECIMAL_NO_DIGITS, perr(r"a{}")); assert_eq!(ERR_DECIMAL_NO_DIGITS, perr(r"a{]}")); assert_eq!(ERR_DECIMAL_INVALID, perr(r"a{999999999999999}")); assert_eq!(ERR_CLASS_UNCLOSED_AFTER_ITEM, perr(r"[a")); assert_eq!(ERR_CLASS_INVALID_RANGE_ITEM, perr(r"[\w-a]")); assert_eq!(ERR_CLASS_INVALID_RANGE_ITEM, perr(r"[a-\w]")); assert_eq!(ERR_CLASS_INVALID_ITEM, perr(r"[\b]")); assert_eq!(ERR_CLASS_UNCLOSED_AFTER_DASH, perr(r"[a-")); assert_eq!(ERR_CLASS_UNCLOSED_AFTER_NEGATION, perr(r"[^")); assert_eq!(ERR_CLASS_UNCLOSED_AFTER_CLOSING, perr(r"[]")); assert_eq!(ERR_CLASS_INVALID_RANGE, perr(r"[z-a]")); assert_eq!(ERR_CLASS_UNCLOSED, perr(r"[")); assert_eq!(ERR_CLASS_UNCLOSED, perr(r"[a-z")); assert_eq!(ERR_CLASS_NEST_UNSUPPORTED, perr(r"[a-z[A-Z]]")); assert_eq!(ERR_CLASS_NEST_UNSUPPORTED, perr(r"[[:alnum]]")); assert_eq!(ERR_CLASS_INTERSECTION_UNSUPPORTED, perr(r"[a&&b]")); assert_eq!(ERR_CLASS_DIFFERENCE_UNSUPPORTED, perr(r"[a--b]")); assert_eq!(ERR_CLASS_SYMDIFFERENCE_UNSUPPORTED, perr(r"[a~~b]")); } #[test] fn err_verbatim() { // See: https://github.com/rust-lang/regex/issues/792 assert_eq!(ERR_CLASS_UNCLOSED_AFTER_DASH, perr(r"(?x)[-#]")); assert_eq!(ERR_CLASS_UNCLOSED_AFTER_ITEM, perr(r"(?x)[a ")); assert_eq!(ERR_CLASS_UNCLOSED_AFTER_DASH, perr(r"(?x)[a- ")); assert_eq!(ERR_CLASS_UNCLOSED, perr(r"(?x)[ ")); } // This tests a bug fix where the nest limit checker wasn't decrementing // its depth during post-traversal, which causes long regexes to trip // the default limit too aggressively. #[test] fn regression_454_nest_too_big() { let pattern = r#" 2(?: [45]\d{3}| 7(?: 1[0-267]| 2[0-289]| 3[0-29]| 4[01]| 5[1-3]| 6[013]| 7[0178]| 91 )| 8(?: 0[125]| [139][1-6]| 2[0157-9]| 41| 6[1-35]| 7[1-5]| 8[1-8]| 90 )| 9(?: 0[0-2]| 1[0-4]| 2[568]| 3[3-6]| 5[5-7]| 6[0167]| 7[15]| 8[0146-9] ) )\d{4} "#; p(pattern); } // This tests that we treat a trailing `-` in a character class as a // literal `-` even when whitespace mode is enabled and there is whitespace // after the trailing `-`. #[test] fn regression_455_trailing_dash_ignore_whitespace() { p("(?x)[ / - ]"); p("(?x)[ a - ]"); p("(?x)[ a - ] "); p("(?x)[ a # wat - ] "); perr("(?x)[ / -"); perr("(?x)[ / - "); perr( "(?x)[ / - ", ); perr( "(?x)[ / - # wat ", ); } #[test] fn regression_capture_indices() { let got = p(r"(a|ab|c|bcd){4,10}(d*)"); assert_eq!( got, Hir::concat(vec![ Hir::repetition(hir::Repetition { min: 4, max: Some(10), greedy: true, sub: Box::new(cap( 1, Hir::alternation(vec![ Hir::char('a'), Hir::concat(vec![Hir::char('a'), Hir::char('b')]), Hir::char('c'), Hir::concat(vec![ Hir::char('b'), Hir::char('c'), Hir::char('d') ]), ]) )) }), cap( 2, Hir::repetition(hir::Repetition { min: 0, max: None, greedy: true, sub: Box::new(Hir::char('d')), }) ), ]) ); } } <file_sep>/UNICODE.md # Unicode conformance This document describes the regex crate's conformance to Unicode's [UTS#18](https://unicode.org/reports/tr18/) report, which lays out 3 levels of support: Basic, Extended and Tailored. Full support for Level 1 ("Basic Unicode Support") is provided with two exceptions: 1. Line boundaries are not Unicode aware. Namely, only the `\n` (`END OF LINE`) character is recognized as a line boundary by default. One can opt into `\r\n|\r|\n` being a line boundary via CRLF mode. 2. The compatibility properties specified by [RL1.2a](https://unicode.org/reports/tr18/#RL1.2a) are ASCII-only definitions. Little to no support is provided for either Level 2 or Level 3. For the most part, this is because the features are either complex/hard to implement, or at the very least, very difficult to implement without sacrificing performance. For example, tackling canonical equivalence such that matching worked as one would expect regardless of normalization form would be a significant undertaking. This is at least partially a result of the fact that this regex engine is based on finite automata, which admits less flexibility normally associated with backtracking implementations. ## RL1.1 Hex Notation [UTS#18 RL1.1](https://unicode.org/reports/tr18/#Hex_notation) Hex Notation refers to the ability to specify a Unicode code point in a regular expression via its hexadecimal code point representation. This is useful in environments that have poor Unicode font rendering or if you need to express a code point that is not normally displayable. All forms of hexadecimal notation are supported \x7F hex character code (exactly two digits) \x{10FFFF} any hex character code corresponding to a Unicode code point \u007F hex character code (exactly four digits) \u{7F} any hex character code corresponding to a Unicode code point \U0000007F hex character code (exactly eight digits) \U{7F} any hex character code corresponding to a Unicode code point Briefly, the `\x{...}`, `\u{...}` and `\U{...}` are all exactly equivalent ways of expressing hexadecimal code points. Any number of digits can be written within the brackets. In contrast, `\xNN`, `\uNNNN`, `\UNNNNNNNN` are all fixed-width variants of the same idea. Note that when Unicode mode is disabled, any non-ASCII Unicode codepoint is banned. Additionally, the `\xNN` syntax represents arbitrary bytes when Unicode mode is disabled. That is, the regex `\xFF` matches the Unicode codepoint U+00FF (encoded as `\xC3\xBF` in UTF-8) while the regex `(?-u)\xFF` matches the literal byte `\xFF`. ## RL1.2 Properties [UTS#18 RL1.2](https://unicode.org/reports/tr18/#Categories) Full support for Unicode property syntax is provided. Unicode properties provide a convenient way to construct character classes of groups of code points specified by Unicode. The regex crate does not provide exhaustive support, but covers a useful subset. In particular: * [General categories](https://unicode.org/reports/tr18/#General_Category_Property) * [Scripts and Script Extensions](https://unicode.org/reports/tr18/#Script_Property) * [Age](https://unicode.org/reports/tr18/#Age) * A smattering of boolean properties, including all of those specified by [RL1.2](https://unicode.org/reports/tr18/#RL1.2) explicitly. In all cases, property name and value abbreviations are supported, and all names/values are matched loosely without regard for case, whitespace or underscores. Property name aliases can be found in Unicode's [`PropertyAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyAliases.txt) file, while property value aliases can be found in Unicode's [`PropertyValueAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyValueAliases.txt) file. The syntax supported is also consistent with the UTS#18 recommendation: * `\p{Greek}` selects the `Greek` script. Equivalent expressions follow: `\p{sc:Greek}`, `\p{Script:Greek}`, `\p{Sc=Greek}`, `\p{script=Greek}`, `\P{sc!=Greek}`. Similarly for `General_Category` (or `gc` for short) and `Script_Extensions` (or `scx` for short). * `\p{age:3.2}` selects all code points in Unicode 3.2. * `\p{Alphabetic}` selects the "alphabetic" property and can be abbreviated via `\p{alpha}` (for example). * Single letter variants for properties with single letter abbreviations. For example, `\p{Letter}` can be equivalently written as `\pL`. The following is a list of all properties supported by the regex crate (starred properties correspond to properties required by RL1.2): * `General_Category` \* (including `Any`, `ASCII` and `Assigned`) * `Script` \* * `Script_Extensions` \* * `Age` * `ASCII_Hex_Digit` * `Alphabetic` \* * `Bidi_Control` * `Case_Ignorable` * `Cased` * `Changes_When_Casefolded` * `Changes_When_Casemapped` * `Changes_When_Lowercased` * `Changes_When_Titlecased` * `Changes_When_Uppercased` * `Dash` * `Default_Ignorable_Code_Point` \* * `Deprecated` * `Diacritic` * `Emoji` * `Emoji_Presentation` * `Emoji_Modifier` * `Emoji_Modifier_Base` * `Emoji_Component` * `Extended_Pictographic` * `Extender` * `Grapheme_Base` * `Grapheme_Cluster_Break` * `Grapheme_Extend` * `Hex_Digit` * `IDS_Binary_Operator` * `IDS_Trinary_Operator` * `ID_Continue` * `ID_Start` * `Join_Control` * `Logical_Order_Exception` * `Lowercase` \* * `Math` * `Noncharacter_Code_Point` \* * `Pattern_Syntax` * `Pattern_White_Space` * `Prepended_Concatenation_Mark` * `Quotation_Mark` * `Radical` * `Regional_Indicator` * `Sentence_Break` * `Sentence_Terminal` * `Soft_Dotted` * `Terminal_Punctuation` * `Unified_Ideograph` * `Uppercase` \* * `Variation_Selector` * `White_Space` \* * `Word_Break` * `XID_Continue` * `XID_Start` ## RL1.2a Compatibility Properties [UTS#18 RL1.2a](https://unicode.org/reports/tr18/#RL1.2a) The regex crate only provides ASCII definitions of the [compatibility properties documented in UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties) (sans the `\X` class, for matching grapheme clusters, which isn't provided at all). This is because it seems to be consistent with most other regular expression engines, and in particular, because these are often referred to as "ASCII" or "POSIX" character classes. Note that the `\w`, `\s` and `\d` character classes **are** Unicode aware. Their traditional ASCII definition can be used by disabling Unicode. That is, `[[:word:]]` and `(?-u)\w` are equivalent. ## RL1.3 Subtraction and Intersection [UTS#18 RL1.3](https://unicode.org/reports/tr18/#Subtraction_and_Intersection) The regex crate provides full support for nested character classes, along with union, intersection (`&&`), difference (`--`) and symmetric difference (`~~`) operations on arbitrary character classes. For example, to match all non-ASCII letters, you could use either `[\p{Letter}--\p{Ascii}]` (difference) or `[\p{Letter}&&[^\p{Ascii}]]` (intersecting the negation). ## RL1.4 Simple Word Boundaries [UTS#18 RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries) The regex crate provides basic Unicode aware word boundary assertions. A word boundary assertion can be written as `\b`, or `\B` as its negation. A word boundary negation corresponds to a zero-width match, where its adjacent characters correspond to word and non-word, or non-word and word characters. Conformance in this case chooses to define word character in the same way that the `\w` character class is defined: a code point that is a member of one of the following classes: * `\p{Alphabetic}` * `\p{Join_Control}` * `\p{gc:Mark}` * `\p{gc:Decimal_Number}` * `\p{gc:Connector_Punctuation}` In particular, this differs slightly from the [prescription given in RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries) but is permissible according to [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). Namely, it is convenient and simpler to have `\w` and `\b` be in sync with one another. Finally, Unicode word boundaries can be disabled, which will cause ASCII word boundaries to be used instead. That is, `\b` is a Unicode word boundary while `(?-u)\b` is an ASCII-only word boundary. This can occasionally be beneficial if performance is important, since the implementation of Unicode word boundaries is currently sub-optimal on non-ASCII text. ## RL1.5 Simple Loose Matches [UTS#18 RL1.5](https://unicode.org/reports/tr18/#Simple_Loose_Matches) The regex crate provides full support for case insensitive matching in accordance with RL1.5. That is, it uses the "simple" case folding mapping. The "simple" mapping was chosen because of a key convenient property: every "simple" mapping is a mapping from exactly one code point to exactly one other code point. This makes case insensitive matching of character classes, for example, straight-forward to implement. When case insensitive mode is enabled (e.g., `(?i)[a]` is equivalent to `a|A`), then all characters classes are case folded as well. ## RL1.6 Line Boundaries [UTS#18 RL1.6](https://unicode.org/reports/tr18/#Line_Boundaries) The regex crate only provides support for recognizing the `\n` (`END OF LINE`) character as a line boundary by default. One can also opt into treating `\r\n|\r|\n` as a line boundary via CRLF mode. This choice was made mostly for implementation convenience, and to avoid performance cliffs that Unicode word boundaries are subject to. ## RL1.7 Code Points [UTS#18 RL1.7](https://unicode.org/reports/tr18/#Supplementary_Characters) The regex crate provides full support for Unicode code point matching. Namely, the fundamental atom of any match is always a single code point. Given Rust's strong ties to UTF-8, the following guarantees are also provided: * All matches are reported on valid UTF-8 code unit boundaries. That is, any match range returned by the public regex API is guaranteed to successfully slice the string that was searched. * By consequence of the above, it is impossible to match surrogode code points. No support for UTF-16 is provided, so this is never necessary. Note that when Unicode mode is disabled, the fundamental atom of matching is no longer a code point but a single byte. When Unicode mode is disabled, many Unicode features are disabled as well. For example, `(?-u)\pL` is not a valid regex but `\pL(?-u)\xFF` (matches any Unicode `Letter` followed by the literal byte `\xFF`) is, for example. <file_sep>/regex-automata/src/lib.rs /*! This crate exposes a variety of regex engines used by the `regex` crate. It provides a vast, sprawling and "expert" level API to each regex engine. The regex engines provided by this crate focus heavily on finite automata implementations and specifically guarantee worst case `O(m * n)` time complexity for all searches. (Where `m ~ len(regex)` and `n ~ len(haystack)`.) The primary goal of this crate is to serve as an implementation detail for the `regex` crate. A secondary goal is to make its internals available for use by others. # Table of contents * [Should I be using this crate?](#should-i-be-using-this-crate) gives some reasons for and against using this crate. * [Examples](#examples) provides a small selection of things you can do with this crate. * [Available regex engines](#available-regex-engines) provides a hyperlinked list of all regex engines in this crate. * [API themes](#api-themes) discusses common elements used throughout this crate. * [Crate features](#crate-features) documents the extensive list of Cargo features available. # Should I be using this crate? If you find yourself here because you just want to use regexes, then you should first check out whether the [`regex` crate](https://docs.rs/regex) meets your needs. It provides a streamlined and difficult-to-misuse API for regex searching. If you're here because there is something specific you want to do that can't be easily done with `regex` crate, then you are perhaps in the right place. It's most likely that the first stop you'll want to make is to explore the [`meta` regex APIs](meta). Namely, the `regex` crate is just a light wrapper over a [`meta::Regex`], so its API will probably be the easiest to transition to. In contrast to the `regex` crate, the `meta::Regex` API supports more search parameters and does multi-pattern searches. However, it isn't quite as ergonomic. Otherwise, the following is an inexhaustive list of reasons to use this crate: * You want to analyze or use a [Thompson `NFA`](nfa::thompson::NFA) directly. * You want more powerful multi-pattern search than what is provided by `RegexSet` in the `regex` crate. All regex engines in this crate support multi-pattern searches. * You want to use one of the `regex` crate's internal engines directly because of some interesting configuration that isn't possible via the `regex` crate. For example, a [lazy DFA's configuration](hybrid::dfa::Config) exposes a dizzying number of options for controlling its execution. * You want to use the lower level search APIs. For example, both the [lazy DFA](hybrid::dfa) and [fully compiled DFAs](dfa) support searching by exploring the automaton one state at a time. This might be useful, for example, for stream searches or searches of strings stored in non-contiguous in memory. * You want to build a fully compiled DFA and then [use zero-copy deserialization](dfa::dense::DFA::from_bytes) to load it into memory and use it for searching. This use case is supported in core-only no-std/no-alloc environments. * You want to run [anchored searches](Input::anchored) without using the `^` anchor in your regex pattern. * You need to work-around contention issues with sharing a regex across multiple threads. The [`meta::Regex::search_with`](meta::Regex::search_with) API permits bypassing any kind of synchronization at all by requiring the caller to provide the mutable scratch spaced needed during a search. * You want to build your own regex engine on top of the `regex` crate's infrastructure. # Examples This section tries to identify a few interesting things you can do with this crate and demonstrates them. ### Multi-pattern searches with capture groups One of the more frustrating limitations of `RegexSet` in the `regex` crate (at the time of writing) is that it doesn't report match positions. With this crate, multi-pattern support was intentionally designed in from the beginning, which means it works in all regex engines and even for capture groups as well. This example shows how to search for matches of multiple regexes, where each regex uses the same capture group names to parse different key-value formats. ``` use regex_automata::{meta::Regex, PatternID}; let re = Regex::new_many(&[ r#"(?m)^(?<key>[[:word:]]+)=(?<val>[[:word:]]+)$"#, r#"(?m)^(?<key>[[:word:]]+)="(?<val>[^"]+)"$"#, r#"(?m)^(?<key>[[:word:]]+)='(?<val>[^']+)'$"#, r#"(?m)^(?<key>[[:word:]]+):\s*(?<val>[[:word:]]+)$"#, ])?; let hay = r#" best_album="Blow Your Face Out" best_quote='"then as it was, then again it will be"' best_year=1973 best_simpsons_episode: HOMR "#; let mut kvs = vec![]; for caps in re.captures_iter(hay) { // N.B. One could use capture indices '1' and '2' here // as well. Capture indices are local to each pattern. // (Just like names are.) let key = &hay[caps.get_group_by_name("key").unwrap()]; let val = &hay[caps.get_group_by_name("val").unwrap()]; kvs.push((key, val)); } assert_eq!(kvs, vec![ ("best_album", "Blow Your Face Out"), ("best_quote", "\"then as it was, then again it will be\""), ("best_year", "1973"), ("best_simpsons_episode", "HOMR"), ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` ### Build a full DFA and walk it manually One of the regex engines in this crate is a fully compiled DFA. It takes worst case exponential time to build, but once built, it can be easily explored and used for searches. Here's a simple example that uses its lower level APIs to implement a simple anchored search by hand. ``` use regex_automata::{dfa::{Automaton, dense}, Input}; let dfa = dense::DFA::new(r"(?-u)\b[A-Z]\w+z\b")?; let haystack = "Quartz"; // The start state is determined by inspecting the position and the // initial bytes of the haystack. let mut state = dfa.start_state_forward(&Input::new(haystack))?; // Walk all the bytes in the haystack. for &b in haystack.as_bytes().iter() { state = dfa.next_state(state, b); } // DFAs in this crate require an explicit // end-of-input transition if a search reaches // the end of a haystack. state = dfa.next_eoi_state(state); assert!(dfa.is_match_state(state)); # Ok::<(), Box<dyn std::error::Error>>(()) ``` Or do the same with a lazy DFA that avoids exponential worst case compile time, but requires mutable scratch space to lazily build the DFA during the search. ``` use regex_automata::{hybrid::dfa::DFA, Input}; let dfa = DFA::new(r"(?-u)\b[A-Z]\w+z\b")?; let mut cache = dfa.create_cache(); let hay = "Quartz"; // The start state is determined by inspecting the position and the // initial bytes of the haystack. let mut state = dfa.start_state_forward(&mut cache, &Input::new(hay))?; // Walk all the bytes in the haystack. for &b in hay.as_bytes().iter() { state = dfa.next_state(&mut cache, state, b)?; } // DFAs in this crate require an explicit // end-of-input transition if a search reaches // the end of a haystack. state = dfa.next_eoi_state(&mut cache, state)?; assert!(state.is_match()); # Ok::<(), Box<dyn std::error::Error>>(()) ``` ### Find all overlapping matches This example shows how to build a DFA and use it to find all possible matches, including overlapping matches. A similar example will work with a lazy DFA as well. This also works with multiple patterns and will report all matches at the same position where multiple patterns match. ``` use regex_automata::{ dfa::{dense, Automaton, OverlappingState}, Input, MatchKind, }; let dfa = dense::DFA::builder() .configure(dense::DFA::config().match_kind(MatchKind::All)) .build(r"(?-u)\w{3,}")?; let input = Input::new("homer marge bart lisa maggie"); let mut state = OverlappingState::start(); let mut matches = vec![]; while let Some(hm) = { dfa.try_search_overlapping_fwd(&input, &mut state)?; state.get_match() } { matches.push(hm.offset()); } assert_eq!(matches, vec![ 3, 4, 5, // hom, home, homer 9, 10, 11, // mar, marg, marge 15, 16, // bar, bart 20, 21, // lis, lisa 25, 26, 27, 28, // mag, magg, maggi, maggie ]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Available regex engines The following is a complete list of all regex engines provided by this crate, along with a very brief description of it and why you might want to use it. * [`dfa::regex::Regex`] is a regex engine that works on top of either [dense](dfa::dense) or [sparse](dfa::sparse) fully compiled DFAs. You might use a DFA if you need the fastest possible regex engine in this crate and can afford the exorbitant memory usage usually required by DFAs. Low level APIs on fully compiled DFAs are provided by the [`Automaton` trait](dfa::Automaton). Fully compiled dense DFAs can handle all regexes except for searching a regex with a Unicode word boundary on non-ASCII haystacks. A fully compiled DFA based regex can only report the start and end of each match. * [`hybrid::regex::Regex`] is a regex engine that works on top of a lazily built DFA. Its performance profile is very similar to that of fully compiled DFAs, but can be slower in some pathological cases. Fully compiled DFAs are also amenable to more optimizations, such as state acceleration, that aren't available in a lazy DFA. You might use this lazy DFA if you can't abide the worst case exponential compile time of a full DFA, but still want the DFA search performance in the vast majority of cases. A lazy DFA based regex can only report the start and end of each match. * [`dfa::onepass::DFA`] is a regex engine that is implemented as a DFA, but can report the matches of each capture group in addition to the start and end of each match. The catch is that it only works on a somewhat small subset of regexes known as "one-pass." You'll want to use this for cases when you need capture group matches and the regex is one-pass since it is likely to be faster than any alternative. A one-pass DFA can handle all types of regexes, but does have some reasonable limits on the number of capture groups it can handle. * [`nfa::thompson::backtrack::BoundedBacktracker`] is a regex engine that uses backtracking, but keeps track of the work it has done to avoid catastrophic backtracking. Like the one-pass DFA, it provides the matches of each capture group. It retains the `O(m * n)` worst case time bound. This tends to be slower than the one-pass DFA regex engine, but faster than the PikeVM. It can handle all types of regexes, but usually only works well with small haystacks and small regexes due to the memory required to avoid redoing work. * [`nfa::thompson::pikevm::PikeVM`] is a regex engine that can handle all regexes, of all sizes and provides capture group matches. It tends to be a tool of last resort because it is also usually the slowest regex engine. * [`meta::Regex`] is the meta regex engine that combines *all* of the above engines into one. The reason for this is that each of the engines above have their own caveats such as, "only handles a subset of regexes" or "is generally slow." The meta regex engine accounts for all of these caveats and composes the engines in a way that attempts to mitigate each engine's weaknesses while emphasizing its strengths. For example, it will attempt to run a lazy DFA even if it might fail. In which case, it will restart the search with a likely slower but more capable regex engine. The meta regex engine is what you should default to. Use one of the above engines directly only if you have a specific reason to. # API themes While each regex engine has its own APIs and configuration options, there are some general themes followed by all of them. ### The `Input` abstraction Most search routines in this crate accept anything that implements `Into<Input>`. Both `&str` and `&[u8]` haystacks satisfy this constraint, which means that things like `engine.search("foo")` will work as you would expect. By virtue of accepting an `Into<Input>` though, callers can provide more than just a haystack. Indeed, the [`Input`] type has more details, but briefly, callers can use it to configure various aspects of the search: * The span of the haystack to search via [`Input::span`] or [`Input::range`], which might be a substring of the haystack. * Whether to run an anchored search or not via [`Input::anchored`]. This permits one to require matches to start at the same offset that the search started. * Whether to ask the regex engine to stop as soon as a match is seen via [`Input::earliest`]. This can be used to find the offset of a match as soon as it is known without waiting for the full leftmost-first match to be found. This can also be used to avoid the worst case `O(m * n^2)` time complexity of iteration. Some lower level search routines accept an `&Input` for performance reasons. In which case, `&Input::new("haystack")` can be used for a simple search. ### Error reporting Most, but not all, regex engines in this crate can fail to execute a search. When a search fails, callers cannot determine whether or not a match exists. That is, the result is indeterminate. Search failure, in all cases in this crate, is represented by a [`MatchError`]. Routines that can fail start with the `try_` prefix in their name. For example, [`hybrid::regex::Regex::try_search`] can fail for a number of reasons. Conversely, routines that either can't fail or can panic on failure lack the `try_` prefix. For example, [`hybrid::regex::Regex::find`] will panic in cases where [`hybrid::regex::Regex::try_search`] would return an error, and [`meta::Regex::find`] will never panic. Therefore, callers need to pay close attention to the panicking conditions in the documentation. In most cases, the reasons that a search fails are either predictable or configurable, albeit at some additional cost. An example of predictable failure is [`BoundedBacktracker::try_search`](nfa::thompson::backtrack::BoundedBacktracker::try_search). Namely, it fails whenever the multiplication of the haystack, the regex and some constant exceeds the [configured visited capacity](nfa::thompson::backtrack::Config::visited_capacity). Callers can predict the failure in terms of haystack length via the [`BoundedBacktracker::max_haystack_len`](nfa::thompson::backtrack::BoundedBacktracker::max_haystack_len) method. While this form of failure is technically avoidable by increasing the visited capacity, it isn't practical to do so for all inputs because the memory usage required for larger haystacks becomes impractically large. So in practice, if one is using the bounded backtracker, you really do have to deal with the failure. An example of configurable failure happens when one enables heuristic support for Unicode word boundaries in a DFA. Namely, since the DFAs in this crate (except for the one-pass DFA) do not support Unicode word boundaries on non-ASCII haystacks, building a DFA from an NFA that contains a Unicode word boundary will itself fail. However, one can configure DFAs to still be built in this case by [configuring heuristic support for Unicode word boundaries](hybrid::dfa::Config::unicode_word_boundary). If the NFA the DFA is built from contains a Unicode word boundary, then the DFA will still be built, but special transitions will be added to every state that cause the DFA to fail if any non-ASCII byte is seen. This failure happens at search time and it requires the caller to opt into this. There are other ways for regex engines to fail in this crate, but the above two should represent the general theme of failures one can find. Dealing with these failures is, in part, one the responsibilities of the [meta regex engine](meta). Notice, for example, that the meta regex engine exposes an API that never returns an error nor panics. It carefully manages all of the ways in which the regex engines can fail and either avoids the predictable ones entirely (e.g., the bounded backtracker) or reacts to configured failures by falling back to a different engine (e.g., the lazy DFA quitting because it saw a non-ASCII byte). ### Configuration and Builders Most of the regex engines in this crate come with two types to facilitate building the regex engine: a `Config` and a `Builder`. A `Config` is usually specific to that particular regex engine, but other objects such as parsing and NFA compilation have `Config` types too. A `Builder` is the thing responsible for taking inputs (either pattern strings or already-parsed patterns or even NFAs directly) and turning them into an actual regex engine that can be used for searching. The main reason why building a regex engine is a bit complicated is because of the desire to permit composition with de-coupled components. For example, you might want to [manually construct a Thompson NFA](nfa::thompson::Builder) and then build a regex engine from it without ever using a regex parser at all. On the other hand, you might also want to build a regex engine directly from the concrete syntax. This demonstrates why regex engine construction is so flexible: it needs to support not just convenient construction, but also construction from parts built elsewhere. This is also in turn why there are many different `Config` structs in this crate. Let's look more closely at an example: [`hybrid::regex::Builder`]. It accepts three different `Config` types for configuring construction of a lazy DFA regex: * [`hybrid::regex::Builder::syntax`] accepts a [`util::syntax::Config`] for configuring the options found in the [`regex-syntax`](regex_syntax) crate. For example, whether to match case insensitively. * [`hybrid::regex::Builder::thompson`] accepts a [`nfa::thompson::Config`] for configuring construction of a [Thompson NFA](nfa::thompson::NFA). For example, whether to build an NFA that matches the reverse language described by the regex. * [`hybrid::regex::Builder::dfa`] accept a [`hybrid::dfa::Config`] for configuring construction of the pair of underlying lazy DFAs that make up the lazy DFA regex engine. For example, changing the capacity of the cache used to store the transition table. The lazy DFA regex engine uses all three of those configuration objects for methods like [`hybrid::regex::Builder::build`], which accepts a pattern string containing the concrete syntax of your regex. It uses the syntax configuration to parse it into an AST and translate it into an HIR. Then the NFA configuration when compiling the HIR into an NFA. And then finally the DFA configuration when lazily determinizing the NFA into a DFA. Notice though that the builder also has a [`hybrid::regex::Builder::build_from_dfas`] constructor. This permits callers to build the underlying pair of lazy DFAs themselves (one for the forward searching to find the end of a match and one for the reverse searching to find the start of a match), and then build the regex engine from them. The lazy DFAs, in turn, have their own builder that permits [construction directly from a Thompson NFA](hybrid::dfa::Builder::build_from_nfa). Continuing down the rabbit hole, a Thompson NFA has its own compiler that permits [construction directly from an HIR](nfa::thompson::Compiler::build_from_hir). The lazy DFA regex engine builder lets you follow this rabbit hole all the way down, but also provides convenience routines that do it for you when you don't need precise control over every component. The [meta regex engine](meta) is a good example of something that utilizes the full flexibility of these builders. It often needs not only precise control over each component, but also shares them across multiple regex engines. (Most sharing is done by internal reference accounting. For example, an [`NFA`](nfa::thompson::NFA) is reference counted internally which makes cloning cheap.) ### Size limits Unlike the `regex` crate, the `regex-automata` crate specifically does not enable any size limits by default. That means users of this crate need to be quite careful when using untrusted patterns. Namely, because bounded repetitions can grow exponentially by stacking them, it is possible to build a very large internal regex object from just a small pattern string. For example, the NFA built from the pattern `a{10}{10}{10}{10}{10}{10}{10}` is over 240MB. There are multiple size limit options in this crate. If one or more size limits are relevant for the object you're building, they will be configurable via methods on a corresponding `Config` type. # Crate features This crate has a dizzying number of features. The main idea is to be able to control how much stuff you pull in for your specific use case, since the full crate is quite large and can dramatically increase compile times and binary size. The most barebones but useful configuration is to disable all default features and enable only `dfa-search`. This will bring in just the DFA deserialization and search routines without any dependency on `std` or `alloc`. This does require generating and serializing a DFA, and then storing it somewhere, but it permits regex searches in freestanding or embedded environments. Because there are so many features, they are split into a few groups. The default set of features is: `std`, `syntax`, `perf`, `unicode`, `meta`, `nfa`, `dfa` and `hybrid`. Basically, the default is to enable everything except for development related features like `logging`. ### Ecosystem features * **std** - Enables use of the standard library. In terms of APIs, this usually just means that error types implement the `std::error::Error` trait. Otherwise, `std` sometimes enables the code to be faster, for example, using a `HashMap` instead of a `BTreeMap`. (The `std` feature matters more for dependencies like `aho-corasick` and `memchr`, where `std` is required to enable certain classes of SIMD optimizations.) Enabling `std` automatically enables `alloc`. * **alloc** - Enables use of the `alloc` library. This is required for most APIs in this crate. The main exception is deserializing and searching with fully compiled DFAs. * **logging** - Adds a dependency on the `log` crate and makes this crate emit log messages of varying degrees of utility. The log messages are especially useful in trying to understand what the meta regex engine is doing. ### Performance features * **perf** - Enables all of the below features. * **perf-inline** - When enabled, `inline(always)` is used in (many) strategic locations to help performance at the expense of longer compile times and increased binary size. * **perf-literal** - Enables all literal related optimizations. * **perf-literal-substring** - Enables all single substring literal optimizations. This includes adding a dependency on the `memchr` crate. * **perf-literal-multisubstring** - Enables all multiple substring literal optimizations. This includes adding a dependency on the `aho-corasick` crate. ### Unicode features * **unicode** - Enables all Unicode features. This feature is enabled by default, and will always cover all Unicode features, even if more are added in the future. * **unicode-age** - Provide the data for the [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). This makes it possible to use classes like `\p{Age:6.0}` to refer to all codepoints first introduced in Unicode 6.0 * **unicode-bool** - Provide the data for numerous Unicode boolean properties. The full list is not included here, but contains properties like `Alphabetic`, `Emoji`, `Lowercase`, `Math`, `Uppercase` and `White_Space`. * **unicode-case** - Provide the data for case insensitive matching using [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). * **unicode-gencat** - Provide the data for [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). This includes, but is not limited to, `Decimal_Number`, `Letter`, `Math_Symbol`, `Number` and `Punctuation`. * **unicode-perl** - Provide the data for supporting the Unicode-aware Perl character classes, corresponding to `\w`, `\s` and `\d`. This is also necessary for using Unicode-aware word boundary assertions. Note that if this feature is disabled, the `\s` and `\d` character classes are still available if the `unicode-bool` and `unicode-gencat` features are enabled, respectively. * **unicode-script** - Provide the data for [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, `Latin` and `Thai`. * **unicode-segment** - Provide the data necessary to provide the properties used to implement the [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and `\p{sb=ATerm}`. * **unicode-word-boundary** - Enables support for Unicode word boundaries, i.e., `\b`, in regexes. When this and `unicode-perl` are enabled, then data tables from `regex-syntax` are used to implement Unicode word boundaries. However, if `regex-syntax` isn't enabled as a dependency then one can still enable this feature. It will cause `regex-automata` to bundle its own data table that would otherwise be redundant with `regex-syntax`'s table. ### Regex engine features * **syntax** - Enables a dependency on `regex-syntax`. This makes APIs for building regex engines from pattern strings available. Without the `regex-syntax` dependency, the only way to build a regex engine is generally to deserialize a previously built DFA or to hand assemble an NFA using its [builder API](nfa::thompson::Builder). Once you have an NFA, you can build any of the regex engines in this crate. The `syntax` feature also enables `alloc`. * **meta** - Enables the meta regex engine. This also enables the `syntax` and `nfa-pikevm` features, as both are the minimal requirements needed. The meta regex engine benefits from enabling any of the other regex engines and will use them automatically when appropriate. * **nfa** - Enables all NFA related features below. * **nfa-thompson** - Enables the Thompson NFA APIs. This enables `alloc`. * **nfa-pikevm** - Enables the PikeVM regex engine. This enables `nfa-thompson`. * **nfa-backtrack** - Enables the bounded backtracker regex engine. This enables `nfa-thompson`. * **dfa** - Enables all DFA related features below. * **dfa-build** - Enables APIs for determinizing DFAs from NFAs. This enables `nfa-thompson` and `dfa-search`. * **dfa-search** - Enables APIs for searching with DFAs. * **dfa-onepass** - Enables the one-pass DFA API. This enables `nfa-thompson`. * **hybrid** - Enables the hybrid NFA/DFA or "lazy DFA" regex engine. This enables `alloc` and `nfa-thompson`. */ // We are no_std. #![no_std] // All APIs need docs! #![deny(missing_docs)] // Some intra-doc links are broken when certain features are disabled, so we // only bleat about it when most (all?) features are enabled. But when we do, // we block the build. Links need to work. #![cfg_attr( all( feature = "std", feature = "nfa", feature = "dfa", feature = "hybrid" ), deny(rustdoc::broken_intra_doc_links) )] // Broken rustdoc links are very easy to come by when you start disabling // features. Namely, features tend to change imports, and imports change what's // available to link to. // // Basically, we just don't support rustdoc for anything other than the maximal // feature configuration. Other configurations will work, they just won't be // perfect. // // So here, we specifically allow them so we don't even get warned about them. #![cfg_attr( not(all( feature = "std", feature = "nfa", feature = "dfa", feature = "hybrid" )), allow(rustdoc::broken_intra_doc_links) )] // Kinda similar, but eliminating all of the dead code and unused import // warnings for every feature combo is a fool's errand. Instead, we just // suppress those, but still let them through in a common configuration when we // build most of everything. // // This does actually suggest that when features are disabled, we are actually // compiling more code than we need to be. And this is perhaps not so great // because disabling features is usually done in order to reduce compile times // by reducing the amount of code one compiles... However, usually, most of the // time this dead code is a relatively small amount from the 'util' module. // But... I confess... There isn't a ton of visibility on this. // // I'm happy to try to address this in a different way, but "let's annotate // every function in 'util' with some non-local combination of features" just // cannot be the way forward. #![cfg_attr( not(all( feature = "std", feature = "nfa", feature = "dfa", feature = "hybrid", feature = "perf-literal-substring", feature = "perf-literal-multisubstring", )), allow(dead_code, unused_imports, unused_variables) )] // We generally want all types to impl Debug. #![warn(missing_debug_implementations)] // No clue why this thing is still unstable because it's pretty amazing. This // adds Cargo feature annotations to items in the rustdoc output. Which is // sadly hugely beneficial for this crate due to the number of features. #![cfg_attr(docsrs, feature(doc_auto_cfg))] // I have literally never tested this crate on 16-bit, so it is quite // suspicious to advertise support for it. But... the regex crate, at time // of writing, at least claims to support it by not doing any conditional // compilation based on the target pointer width. So I guess I remain // consistent with that here. // // If you are here because you're on a 16-bit system and you were somehow using // the regex crate previously, please file an issue. Please be prepared to // provide some kind of reproduction or carve out some path to getting 16-bit // working in CI. (Via qemu?) #[cfg(not(any( target_pointer_width = "16", target_pointer_width = "32", target_pointer_width = "64" )))] compile_error!("not supported on non-{16,32,64}, please file an issue"); #[cfg(any(test, feature = "std"))] extern crate std; #[cfg(feature = "alloc")] extern crate alloc; #[cfg(doctest)] doc_comment::doctest!("../README.md"); #[doc(inline)] pub use crate::util::primitives::PatternID; pub use crate::util::search::*; #[macro_use] mod macros; #[cfg(any(feature = "dfa-search", feature = "dfa-onepass"))] pub mod dfa; #[cfg(feature = "hybrid")] pub mod hybrid; #[cfg(feature = "meta")] pub mod meta; #[cfg(feature = "nfa-thompson")] pub mod nfa; pub mod util; <file_sep>/regex-automata/src/util/wire.rs /*! Types and routines that support the wire format of finite automata. Currently, this module just exports a few error types and some small helpers for deserializing [dense DFAs](crate::dfa::dense::DFA) using correct alignment. */ /* A collection of helper functions, types and traits for serializing automata. This crate defines its own bespoke serialization mechanism for some structures provided in the public API, namely, DFAs. A bespoke mechanism was developed primarily because structures like automata demand a specific binary format. Attempting to encode their rich structure in an existing serialization format is just not feasible. Moreover, the format for each structure is generally designed such that deserialization is cheap. More specifically, that deserialization can be done in constant time. (The idea being that you can embed it into your binary or mmap it, and then use it immediately.) In order to achieve this, the dense and sparse DFAs in this crate use an in-memory representation that very closely corresponds to its binary serialized form. This pervades and complicates everything, and in some cases, requires dealing with alignment and reasoning about safety. This technique does have major advantages. In particular, it permits doing the potentially costly work of compiling a finite state machine in an offline manner, and then loading it at runtime not only without having to re-compile the regex, but even without the code required to do the compilation. This, for example, permits one to use a pre-compiled DFA not only in environments without Rust's standard library, but also in environments without a heap. In the code below, whenever we insert some kind of padding, it's to enforce a 4-byte alignment, unless otherwise noted. Namely, u32 is the only state ID type supported. (In a previous version of this library, DFAs were generic over the state ID representation.) Also, serialization generally requires the caller to specify endianness, where as deserialization always assumes native endianness (otherwise cheap deserialization would be impossible). This implies that serializing a structure generally requires serializing both its big-endian and little-endian variants, and then loading the correct one based on the target's endianness. */ use core::{ cmp, convert::{TryFrom, TryInto}, mem::size_of, }; #[cfg(feature = "alloc")] use alloc::{vec, vec::Vec}; use crate::util::{ int::Pointer, primitives::{PatternID, PatternIDError, StateID, StateIDError}, }; /// A hack to align a smaller type `B` with a bigger type `T`. /// /// The usual use of this is with `B = [u8]` and `T = u32`. That is, /// it permits aligning a sequence of bytes on a 4-byte boundary. This /// is useful in contexts where one wants to embed a serialized [dense /// DFA](crate::dfa::dense::DFA) into a Rust a program while guaranteeing the /// alignment required for the DFA. /// /// See [`dense::DFA::from_bytes`](crate::dfa::dense::DFA::from_bytes) for an /// example of how to use this type. #[repr(C)] #[derive(Debug)] pub struct AlignAs<B: ?Sized, T> { /// A zero-sized field indicating the alignment we want. pub _align: [T; 0], /// A possibly non-sized field containing a sequence of bytes. pub bytes: B, } /// An error that occurs when serializing an object from this crate. /// /// Serialization, as used in this crate, universally refers to the process /// of transforming a structure (like a DFA) into a custom binary format /// represented by `&[u8]`. To this end, serialization is generally infallible. /// However, it can fail when caller provided buffer sizes are too small. When /// that occurs, a serialization error is reported. /// /// A `SerializeError` provides no introspection capabilities. Its only /// supported operation is conversion to a human readable error message. /// /// This error type implements the `std::error::Error` trait only when the /// `std` feature is enabled. Otherwise, this type is defined in all /// configurations. #[derive(Debug)] pub struct SerializeError { /// The name of the thing that a buffer is too small for. /// /// Currently, the only kind of serialization error is one that is /// committed by a caller: providing a destination buffer that is too /// small to fit the serialized object. This makes sense conceptually, /// since every valid inhabitant of a type should be serializable. /// /// This is somewhat exposed in the public API of this crate. For example, /// the `to_bytes_{big,little}_endian` APIs return a `Vec<u8>` and are /// guaranteed to never panic or error. This is only possible because the /// implementation guarantees that it will allocate a `Vec<u8>` that is /// big enough. /// /// In summary, if a new serialization error kind needs to be added, then /// it will need careful consideration. what: &'static str, } impl SerializeError { pub(crate) fn buffer_too_small(what: &'static str) -> SerializeError { SerializeError { what } } } impl core::fmt::Display for SerializeError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "destination buffer is too small to write {}", self.what) } } #[cfg(feature = "std")] impl std::error::Error for SerializeError {} /// An error that occurs when deserializing an object defined in this crate. /// /// Serialization, as used in this crate, universally refers to the process /// of transforming a structure (like a DFA) into a custom binary format /// represented by `&[u8]`. Deserialization, then, refers to the process of /// cheaply converting this binary format back to the object's in-memory /// representation as defined in this crate. To the extent possible, /// deserialization will report this error whenever this process fails. /// /// A `DeserializeError` provides no introspection capabilities. Its only /// supported operation is conversion to a human readable error message. /// /// This error type implements the `std::error::Error` trait only when the /// `std` feature is enabled. Otherwise, this type is defined in all /// configurations. #[derive(Debug)] pub struct DeserializeError(DeserializeErrorKind); #[derive(Debug)] enum DeserializeErrorKind { Generic { msg: &'static str }, BufferTooSmall { what: &'static str }, InvalidUsize { what: &'static str }, VersionMismatch { expected: u32, found: u32 }, EndianMismatch { expected: u32, found: u32 }, AlignmentMismatch { alignment: usize, address: usize }, LabelMismatch { expected: &'static str }, ArithmeticOverflow { what: &'static str }, PatternID { err: PatternIDError, what: &'static str }, StateID { err: StateIDError, what: &'static str }, } impl DeserializeError { pub(crate) fn generic(msg: &'static str) -> DeserializeError { DeserializeError(DeserializeErrorKind::Generic { msg }) } pub(crate) fn buffer_too_small(what: &'static str) -> DeserializeError { DeserializeError(DeserializeErrorKind::BufferTooSmall { what }) } fn invalid_usize(what: &'static str) -> DeserializeError { DeserializeError(DeserializeErrorKind::InvalidUsize { what }) } fn version_mismatch(expected: u32, found: u32) -> DeserializeError { DeserializeError(DeserializeErrorKind::VersionMismatch { expected, found, }) } fn endian_mismatch(expected: u32, found: u32) -> DeserializeError { DeserializeError(DeserializeErrorKind::EndianMismatch { expected, found, }) } fn alignment_mismatch( alignment: usize, address: usize, ) -> DeserializeError { DeserializeError(DeserializeErrorKind::AlignmentMismatch { alignment, address, }) } fn label_mismatch(expected: &'static str) -> DeserializeError { DeserializeError(DeserializeErrorKind::LabelMismatch { expected }) } fn arithmetic_overflow(what: &'static str) -> DeserializeError { DeserializeError(DeserializeErrorKind::ArithmeticOverflow { what }) } fn pattern_id_error( err: PatternIDError, what: &'static str, ) -> DeserializeError { DeserializeError(DeserializeErrorKind::PatternID { err, what }) } pub(crate) fn state_id_error( err: StateIDError, what: &'static str, ) -> DeserializeError { DeserializeError(DeserializeErrorKind::StateID { err, what }) } } #[cfg(feature = "std")] impl std::error::Error for DeserializeError {} impl core::fmt::Display for DeserializeError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { use self::DeserializeErrorKind::*; match self.0 { Generic { msg } => write!(f, "{}", msg), BufferTooSmall { what } => { write!(f, "buffer is too small to read {}", what) } InvalidUsize { what } => { write!(f, "{} is too big to fit in a usize", what) } VersionMismatch { expected, found } => write!( f, "unsupported version: \ expected version {} but found version {}", expected, found, ), EndianMismatch { expected, found } => write!( f, "endianness mismatch: expected 0x{:X} but got 0x{:X}. \ (Are you trying to load an object serialized with a \ different endianness?)", expected, found, ), AlignmentMismatch { alignment, address } => write!( f, "alignment mismatch: slice starts at address \ 0x{:X}, which is not aligned to a {} byte boundary", address, alignment, ), LabelMismatch { expected } => write!( f, "label mismatch: start of serialized object should \ contain a NUL terminated {:?} label, but a different \ label was found", expected, ), ArithmeticOverflow { what } => { write!(f, "arithmetic overflow for {}", what) } PatternID { ref err, what } => { write!(f, "failed to read pattern ID for {}: {}", what, err) } StateID { ref err, what } => { write!(f, "failed to read state ID for {}: {}", what, err) } } } } /// Safely converts a `&[u32]` to `&[StateID]` with zero cost. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn u32s_to_state_ids(slice: &[u32]) -> &[StateID] { // SAFETY: This is safe because StateID is defined to have the same memory // representation as a u32 (it is repr(transparent)). While not every u32 // is a "valid" StateID, callers are not permitted to rely on the validity // of StateIDs for memory safety. It can only lead to logical errors. (This // is why StateID::new_unchecked is safe.) unsafe { core::slice::from_raw_parts( slice.as_ptr().cast::<StateID>(), slice.len(), ) } } /// Safely converts a `&mut [u32]` to `&mut [StateID]` with zero cost. pub(crate) fn u32s_to_state_ids_mut(slice: &mut [u32]) -> &mut [StateID] { // SAFETY: This is safe because StateID is defined to have the same memory // representation as a u32 (it is repr(transparent)). While not every u32 // is a "valid" StateID, callers are not permitted to rely on the validity // of StateIDs for memory safety. It can only lead to logical errors. (This // is why StateID::new_unchecked is safe.) unsafe { core::slice::from_raw_parts_mut( slice.as_mut_ptr().cast::<StateID>(), slice.len(), ) } } /// Safely converts a `&[u32]` to `&[PatternID]` with zero cost. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn u32s_to_pattern_ids(slice: &[u32]) -> &[PatternID] { // SAFETY: This is safe because PatternID is defined to have the same // memory representation as a u32 (it is repr(transparent)). While not // every u32 is a "valid" PatternID, callers are not permitted to rely // on the validity of PatternIDs for memory safety. It can only lead to // logical errors. (This is why PatternID::new_unchecked is safe.) unsafe { core::slice::from_raw_parts( slice.as_ptr().cast::<PatternID>(), slice.len(), ) } } /// Checks that the given slice has an alignment that matches `T`. /// /// This is useful for checking that a slice has an appropriate alignment /// before casting it to a &[T]. Note though that alignment is not itself /// sufficient to perform the cast for any `T`. pub(crate) fn check_alignment<T>( slice: &[u8], ) -> Result<(), DeserializeError> { let alignment = core::mem::align_of::<T>(); let address = slice.as_ptr().as_usize(); if address % alignment == 0 { return Ok(()); } Err(DeserializeError::alignment_mismatch(alignment, address)) } /// Reads a possibly empty amount of padding, up to 7 bytes, from the beginning /// of the given slice. All padding bytes must be NUL bytes. /// /// This is useful because it can be theoretically necessary to pad the /// beginning of a serialized object with NUL bytes to ensure that it starts /// at a correctly aligned address. These padding bytes should come immediately /// before the label. /// /// This returns the number of bytes read from the given slice. pub(crate) fn skip_initial_padding(slice: &[u8]) -> usize { let mut nread = 0; while nread < 7 && nread < slice.len() && slice[nread] == 0 { nread += 1; } nread } /// Allocate a byte buffer of the given size, along with some initial padding /// such that `buf[padding..]` has the same alignment as `T`, where the /// alignment of `T` must be at most `8`. In particular, callers should treat /// the first N bytes (second return value) as padding bytes that must not be /// overwritten. In all cases, the following identity holds: /// /// ```ignore /// let (buf, padding) = alloc_aligned_buffer::<StateID>(SIZE); /// assert_eq!(SIZE, buf[padding..].len()); /// ``` /// /// In practice, padding is often zero. /// /// The requirement for `8` as a maximum here is somewhat arbitrary. In /// practice, we never need anything bigger in this crate, and so this function /// does some sanity asserts under the assumption of a max alignment of `8`. #[cfg(feature = "alloc")] pub(crate) fn alloc_aligned_buffer<T>(size: usize) -> (Vec<u8>, usize) { // NOTE: This is a kludge because there's no easy way to allocate a Vec<u8> // with an alignment guaranteed to be greater than 1. We could create a // Vec<u32>, but this cannot be safely transmuted to a Vec<u8> without // concern, since reallocing or dropping the Vec<u8> is UB (different // alignment than the initial allocation). We could define a wrapper type // to manage this for us, but it seems like more machinery than it's worth. let buf = vec![0; size]; let align = core::mem::align_of::<T>(); let address = buf.as_ptr().as_usize(); if address % align == 0 { return (buf, 0); } // Let's try this again. We have to create a totally new alloc with // the maximum amount of bytes we might need. We can't just extend our // pre-existing 'buf' because that might create a new alloc with a // different alignment. let extra = align - 1; let mut buf = vec![0; size + extra]; let address = buf.as_ptr().as_usize(); // The code below handles the case where 'address' is aligned to T, so if // we got lucky and 'address' is now aligned to T (when it previously // wasn't), then we're done. if address % align == 0 { buf.truncate(size); return (buf, 0); } let padding = ((address & !(align - 1)).checked_add(align).unwrap()) .checked_sub(address) .unwrap(); assert!(padding <= 7, "padding of {} is bigger than 7", padding); assert!( padding <= extra, "padding of {} is bigger than extra {} bytes", padding, extra ); buf.truncate(size + padding); assert_eq!(size + padding, buf.len()); assert_eq!( 0, buf[padding..].as_ptr().as_usize() % align, "expected end of initial padding to be aligned to {}", align, ); (buf, padding) } /// Reads a NUL terminated label starting at the beginning of the given slice. /// /// If a NUL terminated label could not be found, then an error is returned. /// Similarly, if a label is found but doesn't match the expected label, then /// an error is returned. /// /// Upon success, the total number of bytes read (including padding bytes) is /// returned. pub(crate) fn read_label( slice: &[u8], expected_label: &'static str, ) -> Result<usize, DeserializeError> { // Set an upper bound on how many bytes we scan for a NUL. Since no label // in this crate is longer than 256 bytes, if we can't find one within that // range, then we have corrupted data. let first_nul = slice[..cmp::min(slice.len(), 256)].iter().position(|&b| b == 0); let first_nul = match first_nul { Some(first_nul) => first_nul, None => { return Err(DeserializeError::generic( "could not find NUL terminated label \ at start of serialized object", )); } }; let len = first_nul + padding_len(first_nul); if slice.len() < len { return Err(DeserializeError::generic( "could not find properly sized label at start of serialized object" )); } if expected_label.as_bytes() != &slice[..first_nul] { return Err(DeserializeError::label_mismatch(expected_label)); } Ok(len) } /// Writes the given label to the buffer as a NUL terminated string. The label /// given must not contain NUL, otherwise this will panic. Similarly, the label /// must not be longer than 255 bytes, otherwise this will panic. /// /// Additional NUL bytes are written as necessary to ensure that the number of /// bytes written is always a multiple of 4. /// /// Upon success, the total number of bytes written (including padding) is /// returned. pub(crate) fn write_label( label: &str, dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = write_label_len(label); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("label")); } dst[..label.len()].copy_from_slice(label.as_bytes()); for i in 0..(nwrite - label.len()) { dst[label.len() + i] = 0; } assert_eq!(nwrite % 4, 0); Ok(nwrite) } /// Returns the total number of bytes (including padding) that would be written /// for the given label. This panics if the given label contains a NUL byte or /// is longer than 255 bytes. (The size restriction exists so that searching /// for a label during deserialization can be done in small bounded space.) pub(crate) fn write_label_len(label: &str) -> usize { if label.len() > 255 { panic!("label must not be longer than 255 bytes"); } if label.as_bytes().iter().position(|&b| b == 0).is_some() { panic!("label must not contain NUL bytes"); } let label_len = label.len() + 1; // +1 for the NUL terminator label_len + padding_len(label_len) } /// Reads the endianness check from the beginning of the given slice and /// confirms that the endianness of the serialized object matches the expected /// endianness. If the slice is too small or if the endianness check fails, /// this returns an error. /// /// Upon success, the total number of bytes read is returned. pub(crate) fn read_endianness_check( slice: &[u8], ) -> Result<usize, DeserializeError> { let (n, nr) = try_read_u32(slice, "endianness check")?; assert_eq!(nr, write_endianness_check_len()); if n != 0xFEFF { return Err(DeserializeError::endian_mismatch(0xFEFF, n)); } Ok(nr) } /// Writes 0xFEFF as an integer using the given endianness. /// /// This is useful for writing into the header of a serialized object. It can /// be read during deserialization as a sanity check to ensure the proper /// endianness is used. /// /// Upon success, the total number of bytes written is returned. pub(crate) fn write_endianness_check<E: Endian>( dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = write_endianness_check_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("endianness check")); } E::write_u32(0xFEFF, dst); Ok(nwrite) } /// Returns the number of bytes written by the endianness check. pub(crate) fn write_endianness_check_len() -> usize { size_of::<u32>() } /// Reads a version number from the beginning of the given slice and confirms /// that is matches the expected version number given. If the slice is too /// small or if the version numbers aren't equivalent, this returns an error. /// /// Upon success, the total number of bytes read is returned. /// /// N.B. Currently, we require that the version number is exactly equivalent. /// In the future, if we bump the version number without a semver bump, then /// we'll need to relax this a bit and support older versions. pub(crate) fn read_version( slice: &[u8], expected_version: u32, ) -> Result<usize, DeserializeError> { let (n, nr) = try_read_u32(slice, "version")?; assert_eq!(nr, write_version_len()); if n != expected_version { return Err(DeserializeError::version_mismatch(expected_version, n)); } Ok(nr) } /// Writes the given version number to the beginning of the given slice. /// /// This is useful for writing into the header of a serialized object. It can /// be read during deserialization as a sanity check to ensure that the library /// code supports the format of the serialized object. /// /// Upon success, the total number of bytes written is returned. pub(crate) fn write_version<E: Endian>( version: u32, dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = write_version_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small("version number")); } E::write_u32(version, dst); Ok(nwrite) } /// Returns the number of bytes written by writing the version number. pub(crate) fn write_version_len() -> usize { size_of::<u32>() } /// Reads a pattern ID from the given slice. If the slice has insufficient /// length, then this panics. If the deserialized integer exceeds the pattern /// ID limit for the current target, then this returns an error. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn read_pattern_id( slice: &[u8], what: &'static str, ) -> Result<(PatternID, usize), DeserializeError> { let bytes: [u8; PatternID::SIZE] = slice[..PatternID::SIZE].try_into().unwrap(); let pid = PatternID::from_ne_bytes(bytes) .map_err(|err| DeserializeError::pattern_id_error(err, what))?; Ok((pid, PatternID::SIZE)) } /// Reads a pattern ID from the given slice. If the slice has insufficient /// length, then this panics. Otherwise, the deserialized integer is assumed /// to be a valid pattern ID. /// /// This also returns the number of bytes read. pub(crate) fn read_pattern_id_unchecked(slice: &[u8]) -> (PatternID, usize) { let pid = PatternID::from_ne_bytes_unchecked( slice[..PatternID::SIZE].try_into().unwrap(), ); (pid, PatternID::SIZE) } /// Write the given pattern ID to the beginning of the given slice of bytes /// using the specified endianness. The given slice must have length at least /// `PatternID::SIZE`, or else this panics. Upon success, the total number of /// bytes written is returned. pub(crate) fn write_pattern_id<E: Endian>( pid: PatternID, dst: &mut [u8], ) -> usize { E::write_u32(pid.as_u32(), dst); PatternID::SIZE } /// Attempts to read a state ID from the given slice. If the slice has an /// insufficient number of bytes or if the state ID exceeds the limit for /// the current target, then this returns an error. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn try_read_state_id( slice: &[u8], what: &'static str, ) -> Result<(StateID, usize), DeserializeError> { if slice.len() < StateID::SIZE { return Err(DeserializeError::buffer_too_small(what)); } read_state_id(slice, what) } /// Reads a state ID from the given slice. If the slice has insufficient /// length, then this panics. If the deserialized integer exceeds the state ID /// limit for the current target, then this returns an error. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn read_state_id( slice: &[u8], what: &'static str, ) -> Result<(StateID, usize), DeserializeError> { let bytes: [u8; StateID::SIZE] = slice[..StateID::SIZE].try_into().unwrap(); let sid = StateID::from_ne_bytes(bytes) .map_err(|err| DeserializeError::state_id_error(err, what))?; Ok((sid, StateID::SIZE)) } /// Reads a state ID from the given slice. If the slice has insufficient /// length, then this panics. Otherwise, the deserialized integer is assumed /// to be a valid state ID. /// /// This also returns the number of bytes read. pub(crate) fn read_state_id_unchecked(slice: &[u8]) -> (StateID, usize) { let sid = StateID::from_ne_bytes_unchecked( slice[..StateID::SIZE].try_into().unwrap(), ); (sid, StateID::SIZE) } /// Write the given state ID to the beginning of the given slice of bytes /// using the specified endianness. The given slice must have length at least /// `StateID::SIZE`, or else this panics. Upon success, the total number of /// bytes written is returned. pub(crate) fn write_state_id<E: Endian>( sid: StateID, dst: &mut [u8], ) -> usize { E::write_u32(sid.as_u32(), dst); StateID::SIZE } /// Try to read a u16 as a usize from the beginning of the given slice in /// native endian format. If the slice has fewer than 2 bytes or if the /// deserialized number cannot be represented by usize, then this returns an /// error. The error message will include the `what` description of what is /// being deserialized, for better error messages. `what` should be a noun in /// singular form. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn try_read_u16_as_usize( slice: &[u8], what: &'static str, ) -> Result<(usize, usize), DeserializeError> { try_read_u16(slice, what).and_then(|(n, nr)| { usize::try_from(n) .map(|n| (n, nr)) .map_err(|_| DeserializeError::invalid_usize(what)) }) } /// Try to read a u32 as a usize from the beginning of the given slice in /// native endian format. If the slice has fewer than 4 bytes or if the /// deserialized number cannot be represented by usize, then this returns an /// error. The error message will include the `what` description of what is /// being deserialized, for better error messages. `what` should be a noun in /// singular form. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn try_read_u32_as_usize( slice: &[u8], what: &'static str, ) -> Result<(usize, usize), DeserializeError> { try_read_u32(slice, what).and_then(|(n, nr)| { usize::try_from(n) .map(|n| (n, nr)) .map_err(|_| DeserializeError::invalid_usize(what)) }) } /// Try to read a u16 from the beginning of the given slice in native endian /// format. If the slice has fewer than 2 bytes, then this returns an error. /// The error message will include the `what` description of what is being /// deserialized, for better error messages. `what` should be a noun in /// singular form. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn try_read_u16( slice: &[u8], what: &'static str, ) -> Result<(u16, usize), DeserializeError> { check_slice_len(slice, size_of::<u16>(), what)?; Ok((read_u16(slice), size_of::<u16>())) } /// Try to read a u32 from the beginning of the given slice in native endian /// format. If the slice has fewer than 4 bytes, then this returns an error. /// The error message will include the `what` description of what is being /// deserialized, for better error messages. `what` should be a noun in /// singular form. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn try_read_u32( slice: &[u8], what: &'static str, ) -> Result<(u32, usize), DeserializeError> { check_slice_len(slice, size_of::<u32>(), what)?; Ok((read_u32(slice), size_of::<u32>())) } /// Try to read a u128 from the beginning of the given slice in native endian /// format. If the slice has fewer than 16 bytes, then this returns an error. /// The error message will include the `what` description of what is being /// deserialized, for better error messages. `what` should be a noun in /// singular form. /// /// Upon success, this also returns the number of bytes read. pub(crate) fn try_read_u128( slice: &[u8], what: &'static str, ) -> Result<(u128, usize), DeserializeError> { check_slice_len(slice, size_of::<u128>(), what)?; Ok((read_u128(slice), size_of::<u128>())) } /// Read a u16 from the beginning of the given slice in native endian format. /// If the slice has fewer than 2 bytes, then this panics. /// /// Marked as inline to speed up sparse searching which decodes integers from /// its automaton at search time. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn read_u16(slice: &[u8]) -> u16 { let bytes: [u8; 2] = slice[..size_of::<u16>()].try_into().unwrap(); u16::from_ne_bytes(bytes) } /// Read a u32 from the beginning of the given slice in native endian format. /// If the slice has fewer than 4 bytes, then this panics. /// /// Marked as inline to speed up sparse searching which decodes integers from /// its automaton at search time. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn read_u32(slice: &[u8]) -> u32 { let bytes: [u8; 4] = slice[..size_of::<u32>()].try_into().unwrap(); u32::from_ne_bytes(bytes) } /// Read a u128 from the beginning of the given slice in native endian format. /// If the slice has fewer than 16 bytes, then this panics. pub(crate) fn read_u128(slice: &[u8]) -> u128 { let bytes: [u8; 16] = slice[..size_of::<u128>()].try_into().unwrap(); u128::from_ne_bytes(bytes) } /// Checks that the given slice has some minimal length. If it's smaller than /// the bound given, then a "buffer too small" error is returned with `what` /// describing what the buffer represents. pub(crate) fn check_slice_len<T>( slice: &[T], at_least_len: usize, what: &'static str, ) -> Result<(), DeserializeError> { if slice.len() < at_least_len { return Err(DeserializeError::buffer_too_small(what)); } Ok(()) } /// Multiply the given numbers, and on overflow, return an error that includes /// 'what' in the error message. /// /// This is useful when doing arithmetic with untrusted data. pub(crate) fn mul( a: usize, b: usize, what: &'static str, ) -> Result<usize, DeserializeError> { match a.checked_mul(b) { Some(c) => Ok(c), None => Err(DeserializeError::arithmetic_overflow(what)), } } /// Add the given numbers, and on overflow, return an error that includes /// 'what' in the error message. /// /// This is useful when doing arithmetic with untrusted data. pub(crate) fn add( a: usize, b: usize, what: &'static str, ) -> Result<usize, DeserializeError> { match a.checked_add(b) { Some(c) => Ok(c), None => Err(DeserializeError::arithmetic_overflow(what)), } } /// Shift `a` left by `b`, and on overflow, return an error that includes /// 'what' in the error message. /// /// This is useful when doing arithmetic with untrusted data. pub(crate) fn shl( a: usize, b: usize, what: &'static str, ) -> Result<usize, DeserializeError> { let amount = u32::try_from(b) .map_err(|_| DeserializeError::arithmetic_overflow(what))?; match a.checked_shl(amount) { Some(c) => Ok(c), None => Err(DeserializeError::arithmetic_overflow(what)), } } /// Returns the number of additional bytes required to add to the given length /// in order to make the total length a multiple of 4. The return value is /// always less than 4. pub(crate) fn padding_len(non_padding_len: usize) -> usize { (4 - (non_padding_len & 0b11)) & 0b11 } /// A simple trait for writing code generic over endianness. /// /// This is similar to what byteorder provides, but we only need a very small /// subset. pub(crate) trait Endian { /// Writes a u16 to the given destination buffer in a particular /// endianness. If the destination buffer has a length smaller than 2, then /// this panics. fn write_u16(n: u16, dst: &mut [u8]); /// Writes a u32 to the given destination buffer in a particular /// endianness. If the destination buffer has a length smaller than 4, then /// this panics. fn write_u32(n: u32, dst: &mut [u8]); /// Writes a u64 to the given destination buffer in a particular /// endianness. If the destination buffer has a length smaller than 8, then /// this panics. fn write_u64(n: u64, dst: &mut [u8]); /// Writes a u128 to the given destination buffer in a particular /// endianness. If the destination buffer has a length smaller than 16, /// then this panics. fn write_u128(n: u128, dst: &mut [u8]); } /// Little endian writing. pub(crate) enum LE {} /// Big endian writing. pub(crate) enum BE {} #[cfg(target_endian = "little")] pub(crate) type NE = LE; #[cfg(target_endian = "big")] pub(crate) type NE = BE; impl Endian for LE { fn write_u16(n: u16, dst: &mut [u8]) { dst[..2].copy_from_slice(&n.to_le_bytes()); } fn write_u32(n: u32, dst: &mut [u8]) { dst[..4].copy_from_slice(&n.to_le_bytes()); } fn write_u64(n: u64, dst: &mut [u8]) { dst[..8].copy_from_slice(&n.to_le_bytes()); } fn write_u128(n: u128, dst: &mut [u8]) { dst[..16].copy_from_slice(&n.to_le_bytes()); } } impl Endian for BE { fn write_u16(n: u16, dst: &mut [u8]) { dst[..2].copy_from_slice(&n.to_be_bytes()); } fn write_u32(n: u32, dst: &mut [u8]) { dst[..4].copy_from_slice(&n.to_be_bytes()); } fn write_u64(n: u64, dst: &mut [u8]) { dst[..8].copy_from_slice(&n.to_be_bytes()); } fn write_u128(n: u128, dst: &mut [u8]) { dst[..16].copy_from_slice(&n.to_be_bytes()); } } #[cfg(all(test, feature = "alloc"))] mod tests { use super::*; #[test] fn labels() { let mut buf = [0; 1024]; let nwrite = write_label("fooba", &mut buf).unwrap(); assert_eq!(nwrite, 8); assert_eq!(&buf[..nwrite], b"fooba\x00\x00\x00"); let nread = read_label(&buf, "fooba").unwrap(); assert_eq!(nread, 8); } #[test] #[should_panic] fn bad_label_interior_nul() { // interior NULs are not allowed write_label("foo\x00bar", &mut [0; 1024]).unwrap(); } #[test] fn bad_label_almost_too_long() { // ok write_label(&"z".repeat(255), &mut [0; 1024]).unwrap(); } #[test] #[should_panic] fn bad_label_too_long() { // labels longer than 255 bytes are banned write_label(&"z".repeat(256), &mut [0; 1024]).unwrap(); } #[test] fn padding() { assert_eq!(0, padding_len(8)); assert_eq!(3, padding_len(9)); assert_eq!(2, padding_len(10)); assert_eq!(1, padding_len(11)); assert_eq!(0, padding_len(12)); assert_eq!(3, padding_len(13)); assert_eq!(2, padding_len(14)); assert_eq!(1, padding_len(15)); assert_eq!(0, padding_len(16)); } } <file_sep>/regex-cli/cmd/find/capture/mod.rs use std::io::{stdout, Write}; use { anyhow::Context, bstr::ByteSlice, lexopt::Parser, regex_automata::{ util::captures::{Captures, GroupInfo}, Input, MatchError, PatternID, }, }; use crate::{ args, util::{self, Table}, }; mod dfa; mod nfa; pub fn run(p: &mut Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for capturing groups. This command is limited to regex engines that support resolving capture groups. It prints each match, corresponding to possibly many matching capture groups, on its own line. It is prefixed with the pattern ID of the match. Each group contains both the spans matched and the actual substring that matches. USAGE: regex-cli find capture <engine> ENGINES: backtrack Search with the bounded backtracker regex engine. lite Search with the regex-lite engine. meta Search with the meta regex engine. onepass Search with the one-pass DFA regex engine. pikevm Search with the PikeVM regex engine. regex Search with the top-level API regex engine. "; let cmd = args::next_as_command(USAGE, p)?; match &*cmd { "backtrack" => nfa::run_backtrack(p), "lite" => run_lite(p), "meta" => run_meta(p), "onepass" => dfa::run_onepass(p), "pikevm" => nfa::run_pikevm(p), "regex" => run_regex(p), unk => anyhow::bail!("unrecognized command '{}'", unk), } } fn run_regex(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the top-level API regex engine. USAGE: regex-cli find capture regex [-p <pattern> ...] <haystack-path> regex-cli find capture regex [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut api = args::api::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut haystack, &mut syntax, &mut api, &mut find, ], )?; let pats = patterns.get()?; let syn = syntax.syntax()?; let mut table = Table::empty(); let (re, time) = util::timeitr(|| api.from_patterns(&syn, &pats))?; table.add("build regex time", time); // The top-level API doesn't support regex-automata's more granular Input // abstraction. let input = args::input::Config::default(); // The top-level API also doesn't use 'Captures' from regex-automata // directly, but we can map between them with some annoyance. let group_info = GroupInfo::new([re.capture_names()]) .context("could not build capture group info")?; let mut locs = re.capture_locations(); let search = |input: &Input<'_>, caps: &mut Captures| { caps.set_pattern(None); if !re .captures_read_at(&mut locs, input.haystack(), input.start()) .is_some() { return Ok(()); } caps.set_pattern(Some(PatternID::ZERO)); for i in 0..locs.len() { use regex_automata::util::primitives::NonMaxUsize; let slot_start = i * 2; let slot_end = slot_start + 1; match locs.get(i) { None => { caps.slots_mut()[slot_start] = None; caps.slots_mut()[slot_end] = None; } Some((start, end)) => { caps.slots_mut()[slot_start] = NonMaxUsize::new(start); caps.slots_mut()[slot_end] = NonMaxUsize::new(end); } } } Ok(()) }; if find.count { run_counts( &mut table, &common, &find, &input, &haystack, &group_info, search, )?; } else { run_search( &mut table, &common, &find, &input, &haystack, &group_info, search, )?; } Ok(()) } fn run_meta(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the meta regex engine. USAGE: regex-cli find capture meta [-p <pattern> ...] <haystack-path> regex-cli find capture meta [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut input = args::input::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut meta = args::meta::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut input, &mut patterns, &mut haystack, &mut syntax, &mut meta, &mut find, ], )?; let pats = patterns.get()?; let mut table = Table::empty(); let re = if meta.build_from_patterns() { let (re, time) = util::timeitr(|| meta.from_patterns(&syntax, &pats))?; table.add("build meta time", time); re } else { let (asts, time) = util::timeitr(|| syntax.asts(&pats))?; table.add("parse time", time); let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?; table.add("translate time", time); let (re, time) = util::timeitr(|| meta.from_hirs(&hirs))?; table.add("build meta time", time); re }; let search = |input: &Input<'_>, caps: &mut Captures| { Ok(re.search_captures(input, caps)) }; if find.count { run_counts( &mut table, &common, &find, &input, &haystack, re.group_info(), search, )?; } else { run_search( &mut table, &common, &find, &input, &haystack, re.group_info(), search, )?; } Ok(()) } fn run_lite(p: &mut lexopt::Parser) -> anyhow::Result<()> { const USAGE: &'static str = "\ Executes a search for full matches using the top-level regex-lite engine. USAGE: regex-cli find capture lite [-p <pattern> ...] <haystack-path> regex-cli find capture lite [-p <pattern> ...] -y <haystack> TIP: use -h for short docs and --help for long docs OPTIONS: %options% "; let mut common = args::common::Config::default(); let mut patterns = args::patterns::Config::only_flags(); let mut haystack = args::haystack::Config::default(); let mut syntax = args::syntax::Config::default(); let mut lite = args::lite::Config::default(); let mut find = super::Config::default(); args::configure( p, USAGE, &mut [ &mut common, &mut patterns, &mut haystack, &mut syntax, &mut lite, &mut find, ], )?; let pats = patterns.get()?; let syn = syntax.syntax()?; let mut table = Table::empty(); let (re, time) = util::timeitr(|| lite.from_patterns(&syn, &pats))?; table.add("build regex time", time); // Check that the haystack is valid UTF-8 since regex-lite doesn't support // searching arbitrary byte sequences. (At time of writing.) haystack.get()?.to_str()?; // The top-level API doesn't support regex-automata's more granular Input // abstraction. let input = args::input::Config::default(); // The top-level API also doesn't use 'Captures' from regex-automata // directly, but we can map between them with some annoyance. let group_info = GroupInfo::new([re.capture_names()]) .context("could not build capture group info")?; let mut locs = re.capture_locations(); let search = |input: &Input<'_>, caps: &mut Captures| { let haystack = input.haystack().to_str().unwrap(); caps.set_pattern(None); if !re.captures_read_at(&mut locs, haystack, input.start()).is_some() { return Ok(()); } caps.set_pattern(Some(PatternID::ZERO)); for i in 0..locs.len() { use regex_automata::util::primitives::NonMaxUsize; let slot_start = i * 2; let slot_end = slot_start + 1; match locs.get(i) { None => { caps.slots_mut()[slot_start] = None; caps.slots_mut()[slot_end] = None; } Some((start, end)) => { caps.slots_mut()[slot_start] = NonMaxUsize::new(start); caps.slots_mut()[slot_end] = NonMaxUsize::new(end); } } } Ok(()) }; if find.count { run_counts( &mut table, &common, &find, &input, &haystack, &group_info, search, )?; } else { run_search( &mut table, &common, &find, &input, &haystack, &group_info, search, )?; } Ok(()) } /// A function that takes in a bunch of configuration, runs the given search /// routine, and prints out a table of counts. fn run_counts( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, group_info: &GroupInfo, mut search: impl FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (counts, time) = util::timeitr(|| { let mut counts = vec![vec![]; group_info.pattern_len()]; for i in 0..group_info.pattern_len() { let pid = PatternID::new(i).context("invalid pattern ID")?; counts[i] = vec![0; group_info.group_len(pid)]; } let mut caps = Captures::all(group_info.clone()); for _ in 0..find.repeat() { let mut it = regex_automata::util::iter::Searcher::new(input.clone()); loop { let m = it.try_advance(|input| { search(input, &mut caps)?; Ok(caps.get_match()) })?; match m { None => break, Some(m) => { for (i, span) in caps.iter().enumerate() { if span.is_some() { counts[m.pattern()][i] += 1; } } } } } } Ok::<_, anyhow::Error>(counts) })?; table.add("search time", time); table.add("total matches", counts.iter().map(|c| c[0]).sum::<u64>()); if common.table() { table.print(&mut out)?; } if !common.quiet { for (i, pattern_counts) in counts.iter().enumerate() { let pid = PatternID::new(i).context("invalid pattern ID")?; write!(out, "{}:{{ ", pid.as_usize())?; let names = group_info.pattern_names(pid); for (group_index, maybe_name) in names.enumerate() { if group_index > 0 { write!(out, ", ")?; } let count = pattern_counts[group_index]; if let Some(name) = maybe_name { write!(out, "{}/{}: {}", group_index, name, count)?; } else { write!(out, "{}: {}", group_index, count)?; } } write!(out, " }}\n")?; } } Ok(()) }) } /// Like `run_counts`, but prints the actual matches instead. fn run_search( table: &mut Table, common: &args::common::Config, find: &super::Config, input: &args::input::Config, haystack: &args::haystack::Config, group_info: &GroupInfo, mut search: impl FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, ) -> anyhow::Result<()> { let mut out = stdout(); input.with(haystack, |input| { let (matches, time) = util::timeitr(|| { let mut matches = vec![]; for _ in 0..find.repeat() { let caps = Captures::all(group_info.clone()); let it = regex_automata::util::iter::Searcher::new(input.clone()) .into_captures_iter(caps, &mut search); for caps in it { matches.push(caps?); } } Ok::<_, anyhow::Error>(matches) })?; table.add("search time", time); table.add("total matches", matches.len()); if common.table() { table.print(&mut out)?; } if !common.quiet { for caps in matches.iter() { let pid = caps.pattern().unwrap(); write!(out, "{}:{{ ", pid.as_usize())?; let names = caps.group_info().pattern_names(pid); for (group_index, maybe_name) in names.enumerate() { if group_index > 0 { write!(out, ", ")?; } if let Some(name) = maybe_name { write!(out, "{}/{}: ", group_index, name)?; } else { write!(out, "{}: ", group_index)?; } match caps.get_group(group_index) { None => write!(out, "NONE")?, Some(sp) => { let string = input.haystack()[sp].escape_bytes(); write!( out, "{}..{}/{}", sp.start, sp.end, string )?; } } } write!(out, " }}\n")?; } } Ok(()) }) } <file_sep>/record/old-bench-log/README.md These represent an old log of benchmarks from regex 1.7.3 and older. New and much more comprehensive benchmarks are now maintained as part of the [rebar] project. We keep these old benchmark recordings for posterity, but they may be removed in the future. Measurements can be compared using the [`cargo-benchcmp`][cargo-benchcmp] tool. [rebar]: https://github.com/BurntSushi/rebar [cargo-benchcmp]: https://github.com/BurntSushi/cargo-benchcmp <file_sep>/tests/regression_fuzz.rs // These tests are only run for the "default" test target because some of them // can take quite a long time. Some of them take long enough that it's not // practical to run them in debug mode. :-/ use regex::Regex; macro_rules! regex { ($pattern:expr) => { regex::Regex::new($pattern).unwrap() }; } // See: https://oss-fuzz.com/testcase-detail/5673225499181056 // // Ignored by default since it takes too long in debug mode (almost a minute). #[test] #[ignore] fn fuzz1() { regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**"); } // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505 // See: https://github.com/rust-lang/regex/issues/722 #[test] #[cfg(feature = "unicode")] fn empty_any_errors_no_panic() { assert!(Regex::new(r"\P{any}").is_ok()); } // This tests that a very large regex errors during compilation instead of // using gratuitous amounts of memory. The specific problem is that the // compiler wasn't accounting for the memory used by Unicode character classes // correctly. // // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579 #[test] fn big_regex_fails_to_compile() { let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}"; assert!(Regex::new(pat).is_err()); } // This was caught while on master but before a release went out(!). // // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=58173 #[test] fn todo() { let pat = "(?:z|xx)@|xx"; assert!(Regex::new(pat).is_ok()); } // This was caused by the fuzzer, and then minimized by hand. // // This was caused by a bug in DFA determinization that mishandled NFA fail // states. #[test] fn fail_branch_prevents_match() { let pat = r".*[a&&b]A|B"; let hay = "B"; let re = Regex::new(pat).unwrap(); assert!(re.is_match(hay)); } <file_sep>/regex-automata/src/nfa/thompson/pikevm.rs /*! An NFA backed Pike VM for executing regex searches with capturing groups. This module provides a [`PikeVM`] that works by simulating an NFA and resolving all spans of capturing groups that participate in a match. */ #[cfg(feature = "internal-instrument-pikevm")] use core::cell::RefCell; use alloc::{vec, vec::Vec}; use crate::{ nfa::thompson::{self, BuildError, State, NFA}, util::{ captures::Captures, empty, iter, prefilter::Prefilter, primitives::{NonMaxUsize, PatternID, SmallIndex, StateID}, search::{ Anchored, HalfMatch, Input, Match, MatchKind, PatternSet, Span, }, sparse_set::SparseSet, }, }; /// A simple macro for conditionally executing instrumentation logic when /// the 'trace' log level is enabled. This is a compile-time no-op when the /// 'internal-instrument-pikevm' feature isn't enabled. The intent here is that /// this makes it easier to avoid doing extra work when instrumentation isn't /// enabled. /// /// This macro accepts a closure of type `|&mut Counters|`. The closure can /// then increment counters (or whatever) in accordance with what one wants /// to track. macro_rules! instrument { ($fun:expr) => { #[cfg(feature = "internal-instrument-pikevm")] { let fun: &mut dyn FnMut(&mut Counters) = &mut $fun; COUNTERS.with(|c: &RefCell<Counters>| fun(&mut *c.borrow_mut())); } }; } #[cfg(feature = "internal-instrument-pikevm")] std::thread_local! { /// Effectively global state used to keep track of instrumentation /// counters. The "proper" way to do this is to thread it through the /// PikeVM, but it makes the code quite icky. Since this is just a /// debugging feature, we're content to relegate it to thread local /// state. When instrumentation is enabled, the counters are reset at the /// beginning of every search and printed (with the 'trace' log level) at /// the end of every search. static COUNTERS: RefCell<Counters> = RefCell::new(Counters::empty()); } /// The configuration used for building a [`PikeVM`]. /// /// A PikeVM configuration is a simple data object that is typically used with /// [`Builder::configure`]. It can be cheaply cloned. /// /// A default configuration can be created either with `Config::new`, or /// perhaps more conveniently, with [`PikeVM::config`]. #[derive(Clone, Debug, Default)] pub struct Config { match_kind: Option<MatchKind>, pre: Option<Option<Prefilter>>, } impl Config { /// Return a new default PikeVM configuration. pub fn new() -> Config { Config::default() } /// Set the desired match semantics. /// /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the /// match semantics of Perl-like regex engines. That is, when multiple /// patterns would match at the same leftmost position, the pattern that /// appears first in the concrete syntax is chosen. /// /// Currently, the only other kind of match semantics supported is /// [`MatchKind::All`]. This corresponds to "classical DFA" construction /// where all possible matches are visited in the NFA by the `PikeVM`. /// /// Typically, `All` is used when one wants to execute an overlapping /// search and `LeftmostFirst` otherwise. In particular, it rarely makes /// sense to use `All` with the various "leftmost" find routines, since the /// leftmost routines depend on the `LeftmostFirst` automata construction /// strategy. Specifically, `LeftmostFirst` results in the `PikeVM` /// simulating dead states as a way to terminate the search and report a /// match. `LeftmostFirst` also supports non-greedy matches using this /// strategy where as `All` does not. pub fn match_kind(mut self, kind: MatchKind) -> Config { self.match_kind = Some(kind); self } /// Set a prefilter to be used whenever a start state is entered. /// /// A [`Prefilter`] in this context is meant to accelerate searches by /// looking for literal prefixes that every match for the corresponding /// pattern (or patterns) must start with. Once a prefilter produces a /// match, the underlying search routine continues on to try and confirm /// the match. /// /// Be warned that setting a prefilter does not guarantee that the search /// will be faster. While it's usually a good bet, if the prefilter /// produces a lot of false positive candidates (i.e., positions matched /// by the prefilter but not by the regex), then the overall result can /// be slower than if you had just executed the regex engine without any /// prefilters. /// /// By default no prefilter is set. /// /// # Example /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::prefilter::Prefilter, /// Input, Match, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); /// let re = PikeVM::builder() /// .configure(PikeVM::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("foo1 barfox bar"); /// assert_eq!(Some(Match::must(0, 5..11)), re.find(&mut cache, input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Be warned though that an incorrect prefilter can lead to incorrect /// results! /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// util::prefilter::Prefilter, /// Input, HalfMatch, MatchKind, /// }; /// /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); /// let re = PikeVM::builder() /// .configure(PikeVM::config().prefilter(pre)) /// .build(r"(foo|bar)[a-z]+")?; /// let mut cache = re.create_cache(); /// let input = Input::new("foo1 barfox bar"); /// // No match reported even though there clearly is one! /// assert_eq!(None, re.find(&mut cache, input)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn prefilter(mut self, pre: Option<Prefilter>) -> Config { self.pre = Some(pre); self } /// Returns the match semantics set in this configuration. pub fn get_match_kind(&self) -> MatchKind { self.match_kind.unwrap_or(MatchKind::LeftmostFirst) } /// Returns the prefilter set in this configuration, if one at all. pub fn get_prefilter(&self) -> Option<&Prefilter> { self.pre.as_ref().unwrap_or(&None).as_ref() } /// Overwrite the default configuration such that the options in `o` are /// always used. If an option in `o` is not set, then the corresponding /// option in `self` is used. If it's not set in `self` either, then it /// remains not set. pub(crate) fn overwrite(&self, o: Config) -> Config { Config { match_kind: o.match_kind.or(self.match_kind), pre: o.pre.or_else(|| self.pre.clone()), } } } /// A builder for a `PikeVM`. /// /// This builder permits configuring options for the syntax of a pattern, /// the NFA construction and the `PikeVM` construction. This builder is /// different from a general purpose regex builder in that it permits fine /// grain configuration of the construction process. The trade off for this is /// complexity, and the possibility of setting a configuration that might not /// make sense. For example, there are two different UTF-8 modes: /// /// * [`util::syntax::Config::utf8`](crate::util::syntax::Config::utf8) /// controls whether the pattern itself can contain sub-expressions that match /// invalid UTF-8. /// * [`thompson::Config::utf8`] controls whether empty matches that split a /// Unicode codepoint are reported or not. /// /// Generally speaking, callers will want to either enable all of these or /// disable all of these. /// /// # Example /// /// This example shows how to disable UTF-8 mode in the syntax and the regex /// itself. This is generally what you want for matching on arbitrary bytes. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, pikevm::PikeVM}, /// util::syntax, /// Match, /// }; /// /// let re = PikeVM::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let mut cache = re.create_cache(); /// /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 1..9)); /// let got = re.find_iter(&mut cache, haystack).next(); /// assert_eq!(expected, got); /// // Notice that `(?-u:[^b])` matches invalid UTF-8, /// // but the subsequent `.*` does not! Disabling UTF-8 /// // on the syntax permits this. /// // /// // N.B. This example does not show the impact of /// // disabling UTF-8 mode on a PikeVM Config, since that /// // only impacts regexes that can produce matches of /// // length 0. /// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Builder { config: Config, #[cfg(feature = "syntax")] thompson: thompson::Compiler, } impl Builder { /// Create a new PikeVM builder with its default configuration. pub fn new() -> Builder { Builder { config: Config::default(), #[cfg(feature = "syntax")] thompson: thompson::Compiler::new(), } } /// Build a `PikeVM` from the given pattern. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(feature = "syntax")] pub fn build(&self, pattern: &str) -> Result<PikeVM, BuildError> { self.build_many(&[pattern]) } /// Build a `PikeVM` from the given patterns. #[cfg(feature = "syntax")] pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<PikeVM, BuildError> { let nfa = self.thompson.build_many(patterns)?; self.build_from_nfa(nfa) } /// Build a `PikeVM` directly from its NFA. /// /// Note that when using this method, any configuration that applies to the /// construction of the NFA itself will of course be ignored, since the NFA /// given here is already built. pub fn build_from_nfa(&self, nfa: NFA) -> Result<PikeVM, BuildError> { nfa.look_set_any().available().map_err(BuildError::word)?; Ok(PikeVM { config: self.config.clone(), nfa }) } /// Apply the given `PikeVM` configuration options to this builder. pub fn configure(&mut self, config: Config) -> &mut Builder { self.config = self.config.overwrite(config); self } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. /// /// These settings only apply when constructing a PikeVM directly from a /// pattern. #[cfg(feature = "syntax")] pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { self.thompson.syntax(config); self } /// Set the Thompson NFA configuration for this builder using /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). /// /// This permits setting things like if additional time should be spent /// shrinking the size of the NFA. /// /// These settings only apply when constructing a PikeVM directly from a /// pattern. #[cfg(feature = "syntax")] pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { self.thompson.configure(config); self } } /// A virtual machine for executing regex searches with capturing groups. /// /// # Infallible APIs /// /// Unlike most other regex engines in this crate, a `PikeVM` never returns an /// error at search time. It supports all [`Anchored`] configurations, never /// quits and works on haystacks of arbitrary length. /// /// There are two caveats to mention though: /// /// * If an invalid pattern ID is given to a search via [`Anchored::Pattern`], /// then the PikeVM will report "no match." This is consistent with all other /// regex engines in this crate. /// * When using [`PikeVM::which_overlapping_matches`] with a [`PatternSet`] /// that has insufficient capacity to store all valid pattern IDs, then if a /// match occurs for a `PatternID` that cannot be inserted, it is silently /// dropped as if it did not match. /// /// # Advice /// /// The `PikeVM` is generally the most "powerful" regex engine in this crate. /// "Powerful" in this context means that it can handle any regular expression /// that is parseable by `regex-syntax` and any size haystack. Regretably, /// the `PikeVM` is also simultaneously often the _slowest_ regex engine in /// practice. This results in an annoying situation where one generally tries /// to pick any other regex engine (or perhaps none at all) before being /// forced to fall back to a `PikeVM`. /// /// For example, a common strategy for dealing with capturing groups is to /// actually look for the overall match of the regex using a faster regex /// engine, like a [lazy DFA](crate::hybrid::regex::Regex). Once the overall /// match is found, one can then run the `PikeVM` on just the match span to /// find the spans of the capturing groups. In this way, the faster regex /// engine does the majority of the work, while the `PikeVM` only lends its /// power in a more limited role. /// /// Unfortunately, this isn't always possible because the faster regex engines /// don't support all of the regex features in `regex-syntax`. This notably /// includes (and is currently limited to) Unicode word boundaries. So if /// your pattern has Unicode word boundaries, you typically can't use a /// DFA-based regex engine at all (unless you [enable heuristic support for /// it](crate::hybrid::dfa::Config::unicode_word_boundary)). (The [one-pass /// DFA](crate::dfa::onepass::DFA) can handle Unicode word boundaries for /// anchored searches only, but in a cruel sort of joke, many Unicode features /// tend to result in making the regex _not_ one-pass.) /// /// # Example /// /// This example shows that the `PikeVM` implements Unicode word boundaries /// correctly by default. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new(r"\b\w+\b")?; /// let mut cache = re.create_cache(); /// /// let mut it = re.find_iter(&mut cache, "<NAME>"); /// assert_eq!(Some(Match::must(0, 0..12)), it.next()); /// assert_eq!(Some(Match::must(0, 13..23)), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct PikeVM { config: Config, nfa: NFA, } impl PikeVM { /// Parse the given regular expression using the default configuration and /// return the corresponding `PikeVM`. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new("foo[0-9]+bar")?; /// let mut cache = re.create_cache(); /// assert_eq!( /// Some(Match::must(0, 3..14)), /// re.find_iter(&mut cache, "zzzfoo12345barzzz").next(), /// ); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new(pattern: &str) -> Result<PikeVM, BuildError> { PikeVM::builder().build(pattern) } /// Like `new`, but parses multiple patterns into a single "multi regex." /// This similarly uses the default regex configuration. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new_many(&["[a-z]+", "[0-9]+"])?; /// let mut cache = re.create_cache(); /// /// let mut it = re.find_iter(&mut cache, "abc 1 foo 4567 0 quux"); /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); /// assert_eq!(None, it.next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new_many<P: AsRef<str>>( patterns: &[P], ) -> Result<PikeVM, BuildError> { PikeVM::builder().build_many(patterns) } /// Like `new`, but builds a PikeVM directly from an NFA. This is useful /// if you already have an NFA, or even if you hand-assembled the NFA. /// /// # Example /// /// This shows how to hand assemble a regular expression via its HIR, /// compile an NFA from it and build a PikeVM from the NFA. /// /// ``` /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; /// /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ /// ClassBytesRange::new(b'0', b'9'), /// ClassBytesRange::new(b'A', b'Z'), /// ClassBytesRange::new(b'_', b'_'), /// ClassBytesRange::new(b'a', b'z'), /// ]))); /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; /// /// let re = PikeVM::new_from_nfa(nfa)?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let expected = Some(Match::must(0, 3..4)); /// re.captures(&mut cache, "!@#A#@!", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new_from_nfa(nfa: NFA) -> Result<PikeVM, BuildError> { PikeVM::builder().build_from_nfa(nfa) } /// Create a new `PikeVM` that matches every input. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::always_match()?; /// let mut cache = re.create_cache(); /// /// let expected = Match::must(0, 0..0); /// assert_eq!(Some(expected), re.find_iter(&mut cache, "").next()); /// assert_eq!(Some(expected), re.find_iter(&mut cache, "foo").next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn always_match() -> Result<PikeVM, BuildError> { let nfa = thompson::NFA::always_match(); PikeVM::new_from_nfa(nfa) } /// Create a new `PikeVM` that never matches any input. /// /// # Example /// /// ``` /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::never_match()?; /// let mut cache = re.create_cache(); /// /// assert_eq!(None, re.find_iter(&mut cache, "").next()); /// assert_eq!(None, re.find_iter(&mut cache, "foo").next()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn never_match() -> Result<PikeVM, BuildError> { let nfa = thompson::NFA::never_match(); PikeVM::new_from_nfa(nfa) } /// Return a default configuration for a `PikeVM`. /// /// This is a convenience routine to avoid needing to import the `Config` /// type when customizing the construction of a `PikeVM`. /// /// # Example /// /// This example shows how to disable UTF-8 mode. When UTF-8 mode is /// disabled, zero-width matches that split a codepoint are allowed. /// Otherwise they are never reported. /// /// In the code below, notice that `""` is permitted to match positions /// that split the encoding of a codepoint. /// /// ``` /// use regex_automata::{nfa::thompson::{self, pikevm::PikeVM}, Match}; /// /// let re = PikeVM::builder() /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"")?; /// let mut cache = re.create_cache(); /// /// let haystack = "a☃z"; /// let mut it = re.find_iter(&mut cache, haystack); /// assert_eq!(Some(Match::must(0, 0..0)), it.next()); /// assert_eq!(Some(Match::must(0, 1..1)), it.next()); /// assert_eq!(Some(Match::must(0, 2..2)), it.next()); /// assert_eq!(Some(Match::must(0, 3..3)), it.next()); /// assert_eq!(Some(Match::must(0, 4..4)), it.next()); /// assert_eq!(Some(Match::must(0, 5..5)), it.next()); /// assert_eq!(None, it.next()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn config() -> Config { Config::new() } /// Return a builder for configuring the construction of a `PikeVM`. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. /// /// # Example /// /// This example shows how to use the builder to disable UTF-8 mode /// everywhere. /// /// ``` /// use regex_automata::{ /// nfa::thompson::{self, pikevm::PikeVM}, /// util::syntax, /// Match, /// }; /// /// let re = PikeVM::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 1..9)); /// re.captures(&mut cache, haystack, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn builder() -> Builder { Builder::new() } /// Create a new empty set of capturing groups that is guaranteed to be /// valid for the search APIs on this `PikeVM`. /// /// A `Captures` value created for a specific `PikeVM` cannot be used with /// any other `PikeVM`. /// /// This is a convenience function for [`Captures::all`]. See the /// [`Captures`] documentation for an explanation of its alternative /// constructors that permit the `PikeVM` to do less work during a search, /// and thus might make it faster. pub fn create_captures(&self) -> Captures { Captures::all(self.get_nfa().group_info().clone()) } /// Create a new cache for this `PikeVM`. /// /// The cache returned should only be used for searches for this /// `PikeVM`. If you want to reuse the cache for another `PikeVM`, then /// you must call [`Cache::reset`] with that `PikeVM` (or, equivalently, /// [`PikeVM::reset_cache`]). pub fn create_cache(&self) -> Cache { Cache::new(self) } /// Reset the given cache such that it can be used for searching with the /// this `PikeVM` (and only this `PikeVM`). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different `PikeVM`. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different `PikeVM`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re1 = PikeVM::new(r"\w")?; /// let re2 = PikeVM::new(r"\W")?; /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Match::must(0, 0..2)), /// re1.find_iter(&mut cache, "Δ").next(), /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the PikeVM we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// re2.reset_cache(&mut cache); /// assert_eq!( /// Some(Match::must(0, 0..3)), /// re2.find_iter(&mut cache, "☃").next(), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset_cache(&self, cache: &mut Cache) { cache.reset(self); } /// Returns the total number of patterns compiled into this `PikeVM`. /// /// In the case of a `PikeVM` that contains no patterns, this returns `0`. /// /// # Example /// /// This example shows the pattern length for a `PikeVM` that never /// matches: /// /// ``` /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::never_match()?; /// assert_eq!(re.pattern_len(), 0); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And another example for a `PikeVM` that matches at every position: /// /// ``` /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::always_match()?; /// assert_eq!(re.pattern_len(), 1); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And finally, a `PikeVM` that was constructed from multiple patterns: /// /// ``` /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; /// assert_eq!(re.pattern_len(), 3); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn pattern_len(&self) -> usize { self.nfa.pattern_len() } /// Return the config for this `PikeVM`. #[inline] pub fn get_config(&self) -> &Config { &self.config } /// Returns a reference to the underlying NFA. #[inline] pub fn get_nfa(&self) -> &NFA { &self.nfa } } impl PikeVM { /// Returns true if and only if this `PikeVM` matches the given haystack. /// /// This routine may short circuit if it knows that scanning future /// input will never lead to a different result. In particular, if the /// underlying NFA enters a match state, then this routine will return /// `true` immediately without inspecting any future input. (Consider how /// this might make a difference given the regex `a+` on the haystack /// `aaaaaaaaaaaaaaa`. This routine can stop after it sees the first `a`, /// but routines like `find` need to continue searching because `+` is /// greedy by default.) /// /// # Example /// /// This shows basic usage: /// /// ``` /// use regex_automata::nfa::thompson::pikevm::PikeVM; /// /// let re = PikeVM::new("foo[0-9]+bar")?; /// let mut cache = re.create_cache(); /// /// assert!(re.is_match(&mut cache, "foo12345bar")); /// assert!(!re.is_match(&mut cache, "foobar")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: consistency with search APIs /// /// `is_match` is guaranteed to return `true` whenever `find` returns a /// match. This includes searches that are executed entirely within a /// codepoint: /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Input}; /// /// let re = PikeVM::new("a*")?; /// let mut cache = re.create_cache(); /// /// assert!(!re.is_match(&mut cache, Input::new("☃").span(1..2))); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Notice that when UTF-8 mode is disabled, then the above reports a /// match because the restriction against zero-width matches that split a /// codepoint has been lifted: /// /// ``` /// use regex_automata::{nfa::thompson::{pikevm::PikeVM, NFA}, Input}; /// /// let re = PikeVM::builder() /// .thompson(NFA::config().utf8(false)) /// .build("a*")?; /// let mut cache = re.create_cache(); /// /// assert!(re.is_match(&mut cache, Input::new("☃").span(1..2))); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_match<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> bool { let input = input.into().earliest(true); self.search_slots(cache, &input, &mut []).is_some() } /// Executes a leftmost forward search and returns a `Match` if one exists. /// /// This routine only includes the overall match span. To get access to the /// individual spans of each capturing group, use [`PikeVM::captures`]. /// /// # Example /// /// Leftmost first match semantics corresponds to the match with the /// smallest starting offset, but where the end offset is determined by /// preferring earlier branches in the original regular expression. For /// example, `Sam|Samwise` will match `Sam` in `Samwise`, but `Samwise|Sam` /// will match `Samwise` in `Samwise`. /// /// Generally speaking, the "leftmost first" match is how most backtracking /// regular expressions tend to work. This is in contrast to POSIX-style /// regular expressions that yield "leftmost longest" matches. Namely, /// both `Sam|Samwise` and `Samwise|Sam` match `Samwise` when using /// leftmost longest semantics. (This crate does not currently support /// leftmost longest semantics.) /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new("foo[0-9]+")?; /// let mut cache = re.create_cache(); /// let expected = Match::must(0, 0..8); /// assert_eq!(Some(expected), re.find(&mut cache, "foo12345")); /// /// // Even though a match is found after reading the first byte (`a`), /// // the leftmost first match semantics demand that we find the earliest /// // match that prefers earlier parts of the pattern over later parts. /// let re = PikeVM::new("abc|a")?; /// let mut cache = re.create_cache(); /// let expected = Match::must(0, 0..3); /// assert_eq!(Some(expected), re.find(&mut cache, "abc")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> Option<Match> { let input = input.into(); if self.get_nfa().pattern_len() == 1 { let mut slots = [None, None]; let pid = self.search_slots(cache, &input, &mut slots)?; let start = slots[0]?.get(); let end = slots[1]?.get(); return Some(Match::new(pid, Span { start, end })); } let ginfo = self.get_nfa().group_info(); let slots_len = ginfo.implicit_slot_len(); let mut slots = vec![None; slots_len]; let pid = self.search_slots(cache, &input, &mut slots)?; let start = slots[pid.as_usize() * 2]?.get(); let end = slots[pid.as_usize() * 2 + 1]?.get(); Some(Match::new(pid, Span { start, end })) } /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided [`Captures`] /// value. If no match was found, then [`Captures::is_match`] is guaranteed /// to return `false`. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; /// /// let re = PikeVM::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "2010-03-14", &mut caps); /// assert!(caps.is_match()); /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn captures<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, caps: &mut Captures, ) { self.search(cache, &input.into(), caps) } /// Returns an iterator over all non-overlapping leftmost matches in the /// given bytes. If no match exists, then the iterator yields no elements. /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re = PikeVM::new("foo[0-9]+")?; /// let mut cache = re.create_cache(); /// /// let text = "foo1 foo12 foo123"; /// let matches: Vec<Match> = re.find_iter(&mut cache, text).collect(); /// assert_eq!(matches, vec![ /// Match::must(0, 0..4), /// Match::must(0, 5..10), /// Match::must(0, 11..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find_iter<'r, 'c, 'h, I: Into<Input<'h>>>( &'r self, cache: &'c mut Cache, input: I, ) -> FindMatches<'r, 'c, 'h> { let caps = Captures::matches(self.get_nfa().group_info().clone()); let it = iter::Searcher::new(input.into()); FindMatches { re: self, cache, caps, it } } /// Returns an iterator over all non-overlapping `Captures` values. If no /// match exists, then the iterator yields no elements. /// /// This yields the same matches as [`PikeVM::find_iter`], but it includes /// the spans of all capturing groups that participate in each match. /// /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for /// how to correctly iterate over all matches in a haystack while avoiding /// the creation of a new `Captures` value for every match. (Which you are /// forced to do with an `Iterator`.) /// /// # Example /// /// ``` /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; /// /// let re = PikeVM::new("foo(?P<numbers>[0-9]+)")?; /// let mut cache = re.create_cache(); /// /// let text = "foo1 foo12 foo123"; /// let matches: Vec<Span> = re /// .captures_iter(&mut cache, text) /// // The unwrap is OK since 'numbers' matches if the pattern matches. /// .map(|caps| caps.get_group_by_name("numbers").unwrap()) /// .collect(); /// assert_eq!(matches, vec![ /// Span::from(3..4), /// Span::from(8..10), /// Span::from(14..17), /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn captures_iter<'r, 'c, 'h, I: Into<Input<'h>>>( &'r self, cache: &'c mut Cache, input: I, ) -> CapturesMatches<'r, 'c, 'h> { let caps = self.create_captures(); let it = iter::Searcher::new(input.into()); CapturesMatches { re: self, cache, caps, it } } } impl PikeVM { /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided [`Captures`] /// value. If no match was found, then [`Captures::is_match`] is guaranteed /// to return `false`. /// /// This is like [`PikeVM::captures`], but it accepts a concrete `&Input` /// instead of an `Into<Input>`. /// /// # Example: specific pattern search /// /// This example shows how to build a multi-PikeVM that permits searching /// for specific patterns. /// /// ``` /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Anchored, Match, PatternID, Input, /// }; /// /// let re = PikeVM::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "foo123"; /// /// // Since we are using the default leftmost-first match and both /// // patterns match at the same starting position, only the first pattern /// // will be returned in this case when doing a search for any of the /// // patterns. /// let expected = Some(Match::must(0, 0..6)); /// re.search(&mut cache, &Input::new(haystack), &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// // But if we want to check whether some other pattern matches, then we /// // can provide its pattern ID. /// let expected = Some(Match::must(1, 0..6)); /// let input = Input::new(haystack) /// .anchored(Anchored::Pattern(PatternID::must(1))); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specifying the bounds of a search /// /// This example shows how providing the bounds of a search can produce /// different results than simply sub-slicing the haystack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match, Input}; /// /// let re = PikeVM::new(r"\b[0-9]{3}\b")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "foo123bar"; /// /// // Since we sub-slice the haystack, the search doesn't know about /// // the larger context and assumes that `123` is surrounded by word /// // boundaries. And of course, the match position is reported relative /// // to the sub-slice as well, which means we get `0..3` instead of /// // `3..6`. /// let expected = Some(Match::must(0, 0..3)); /// re.search(&mut cache, &Input::new(&haystack[3..6]), &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// // But if we provide the bounds of the search within the context of the /// // entire haystack, then the search can take the surrounding context /// // into account. (And if we did find a match, it would be reported /// // as a valid offset into `haystack` instead of its sub-slice.) /// let expected = None; /// let input = Input::new(haystack).range(3..6); /// re.search(&mut cache, &input, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search( &self, cache: &mut Cache, input: &Input<'_>, caps: &mut Captures, ) { caps.set_pattern(None); let pid = self.search_slots(cache, input, caps.slots_mut()); caps.set_pattern(pid); } /// Executes a leftmost forward search and writes the spans of capturing /// groups that participated in a match into the provided `slots`, and /// returns the matching pattern ID. The contents of the slots for patterns /// other than the matching pattern are unspecified. If no match was found, /// then `None` is returned and the contents of `slots` is unspecified. /// /// This is like [`PikeVM::search`], but it accepts a raw slots slice /// instead of a `Captures` value. This is useful in contexts where you /// don't want or need to allocate a `Captures`. /// /// It is legal to pass _any_ number of slots to this routine. If the regex /// engine would otherwise write a slot offset that doesn't fit in the /// provided slice, then it is simply skipped. In general though, there are /// usually three slice lengths you might want to use: /// /// * An empty slice, if you only care about which pattern matched. /// * A slice with /// [`pattern_len() * 2`](crate::nfa::thompson::NFA::pattern_len) /// slots, if you only care about the overall match spans for each matching /// pattern. /// * A slice with /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which /// permits recording match offsets for every capturing group in every /// pattern. /// /// # Example /// /// This example shows how to find the overall match offsets in a /// multi-pattern search without allocating a `Captures` value. Indeed, we /// can put our slots right on the stack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID, Input}; /// /// let re = PikeVM::new_many(&[ /// r"\pL+", /// r"\d+", /// ])?; /// let mut cache = re.create_cache(); /// let input = Input::new("!@#123"); /// /// // We only care about the overall match offsets here, so we just /// // allocate two slots for each pattern. Each slot records the start /// // and end of the match. /// let mut slots = [None; 4]; /// let pid = re.search_slots(&mut cache, &input, &mut slots); /// assert_eq!(Some(PatternID::must(1)), pid); /// /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. /// // See 'GroupInfo' for more details on the mapping between groups and /// // slot indices. /// let slot_start = pid.unwrap().as_usize() * 2; /// let slot_end = slot_start + 1; /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); if !utf8empty { let hm = self.search_slots_imp(cache, input, slots)?; return Some(hm.pattern()); } // There is an unfortunate special case where if the regex can // match the empty string and UTF-8 mode is enabled, the search // implementation requires that the slots have at least as much space // to report the bounds of any match. This is so zero-width matches // that split a codepoint can be filtered out. // // Note that if utf8empty is true, we specialize the case for when // the number of patterns is 1. In that case, we can just use a stack // allocation. Otherwise we resort to a heap allocation, which we // convince ourselves we're fine with due to the pathological nature of // this case. let min = self.get_nfa().group_info().implicit_slot_len(); if slots.len() >= min { let hm = self.search_slots_imp(cache, input, slots)?; return Some(hm.pattern()); } if self.get_nfa().pattern_len() == 1 { let mut enough = [None, None]; let got = self.search_slots_imp(cache, input, &mut enough); // This is OK because we know `enough` is strictly bigger than // `slots`, otherwise this special case isn't reached. slots.copy_from_slice(&enough[..slots.len()]); return got.map(|hm| hm.pattern()); } let mut enough = vec![None; min]; let got = self.search_slots_imp(cache, input, &mut enough); // This is OK because we know `enough` is strictly bigger than `slots`, // otherwise this special case isn't reached. slots.copy_from_slice(&enough[..slots.len()]); got.map(|hm| hm.pattern()) } /// This is the actual implementation of `search_slots_imp` that /// doesn't account for the special case when 1) the NFA has UTF-8 mode /// enabled, 2) the NFA can match the empty string and 3) the caller has /// provided an insufficient number of slots to record match offsets. #[inline(never)] fn search_slots_imp( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<HalfMatch> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); let hm = match self.search_imp(cache, input, slots) { None => return None, Some(hm) if !utf8empty => return Some(hm), Some(hm) => hm, }; empty::skip_splits_fwd(input, hm, hm.offset(), |input| { Ok(self .search_imp(cache, input, slots) .map(|hm| (hm, hm.offset()))) }) // OK because the PikeVM never errors. .unwrap() } /// Writes the set of patterns that match anywhere in the given search /// configuration to `patset`. If multiple patterns match at the same /// position and this `PikeVM` was configured with [`MatchKind::All`] /// semantics, then all matching patterns are written to the given set. /// /// Unless all of the patterns in this `PikeVM` are anchored, then /// generally speaking, this will visit every byte in the haystack. /// /// This search routine *does not* clear the pattern set. This gives some /// flexibility to the caller (e.g., running multiple searches with the /// same pattern set), but does make the API bug-prone if you're reusing /// the same pattern set for multiple searches but intended them to be /// independent. /// /// If a pattern ID matched but the given `PatternSet` does not have /// sufficient capacity to store it, then it is not inserted and silently /// dropped. /// /// # Example /// /// This example shows how to find all matching patterns in a haystack, /// even when some patterns match at the same position as other patterns. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Input, MatchKind, PatternSet, /// }; /// /// let patterns = &[ /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", /// ]; /// let re = PikeVM::builder() /// .configure(PikeVM::config().match_kind(MatchKind::All)) /// .build_many(patterns)?; /// let mut cache = re.create_cache(); /// /// let input = Input::new("foobar"); /// let mut patset = PatternSet::new(re.pattern_len()); /// re.which_overlapping_matches(&mut cache, &input, &mut patset); /// let expected = vec![0, 2, 3, 4, 6]; /// let got: Vec<usize> = patset.iter().map(|p| p.as_usize()).collect(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn which_overlapping_matches( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { self.which_overlapping_imp(cache, input, patset) } } impl PikeVM { /// The implementation of standard leftmost search. /// /// Capturing group spans are written to `slots`, but only if requested. /// `slots` can be any length. Any slot in the NFA that is activated but /// which is out of bounds for the given `slots` is ignored. fn search_imp( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Option<HalfMatch> { cache.setup_search(slots.len()); if input.is_done() { return None; } // Why do we even care about this? Well, in our 'Captures' // representation, we use usize::MAX as a sentinel to indicate "no // match." This isn't problematic so long as our haystack doesn't have // a maximal length. Byte slices are guaranteed by Rust to have a // length that fits into isize, and so this assert should always pass. // But we put it here to make our assumption explicit. assert!( input.haystack().len() < core::usize::MAX, "byte slice lengths must be less than usize MAX", ); instrument!(|c| c.reset(&self.nfa)); // Whether we want to visit all match states instead of emulating the // 'leftmost' semantics of typical backtracking regex engines. let allmatches = self.config.get_match_kind().continue_past_first_match(); let (anchored, start_id) = match self.start_config(input) { None => return None, Some(config) => config, }; let pre = if anchored { None } else { self.get_config().get_prefilter() }; let Cache { ref mut stack, ref mut curr, ref mut next } = cache; let mut hm = None; // Yes, our search doesn't end at input.end(), but includes it. This // is necessary because matches are delayed by one byte, just like // how the DFA engines work. The delay is used to handle look-behind // assertions. In the case of the PikeVM, the delay is implemented // by not considering a match to exist until it is visited in // 'steps'. Technically, we know a match exists in the previous // iteration via 'epsilon_closure'. (It's the same thing in NFA-to-DFA // determinization. We don't mark a DFA state as a match state if it // contains an NFA match state, but rather, whether the DFA state was // generated by a transition from a DFA state that contains an NFA // match state.) let mut at = input.start(); while at <= input.end() { // If we have no states left to visit, then there are some cases // where we know we can quit early or even skip ahead. if curr.set.is_empty() { // We have a match and we haven't been instructed to continue // on even after finding a match, so we can quit. if hm.is_some() && !allmatches { break; } // If we're running an anchored search and we've advanced // beyond the start position with no other states to try, then // we will never observe a match and thus can stop. if anchored && at > input.start() { break; } // If there no states left to explore at this position and we // know we can't terminate early, then we are effectively at // the starting state of the NFA. If we fell through here, // we'd end up adding our '(?s-u:.)*?' prefix and it would be // the only thing in 'curr'. So we might as well just skip // ahead until we find something that we know might advance us // forward. if let Some(ref pre) = pre { let span = Span::from(at..input.end()); match pre.find(input.haystack(), span) { None => break, Some(ref span) => at = span.start, } } } // Instead of using the NFA's unanchored start state, we actually // always use its anchored starting state. As a result, when doing // an unanchored search, we need to simulate our own '(?s-u:.)*?' // prefix, to permit a match to appear anywhere. // // Now, we don't *have* to do things this way. We could use the // NFA's unanchored starting state and do one 'epsilon_closure' // call from that starting state before the main loop here. And // that is just as correct. However, it turns out to be slower // than our approach here because it slightly increases the cost // of processing each byte by requiring us to visit more NFA // states to deal with the additional NFA states in the unanchored // prefix. By simulating it explicitly here, we lower those costs // substantially. The cost is itself small, but it adds up for // large haystacks. // // In order to simulate the '(?s-u:.)*?' prefix---which is not // greedy---we are careful not to perform an epsilon closure on // the start state if we already have a match. Namely, if we // did otherwise, we would never reach a terminating condition // because there would always be additional states to process. // In effect, the exclusion of running 'epsilon_closure' when // we have a match corresponds to the "dead" states we have in // our DFA regex engines. Namely, in a DFA, match states merely // instruct the search execution to record the current offset as // the most recently seen match. It is the dead state that actually // indicates when to stop the search (other than EOF or quit // states). // // However, when 'allmatches' is true, the caller has asked us to // leave in every possible match state. This tends not to make a // whole lot of sense in unanchored searches, because it means the // search really cannot terminate until EOF. And often, in that // case, you wind up skipping over a bunch of matches and are left // with the "last" match. Arguably, it just doesn't make a lot of // sense to run a 'leftmost' search (which is what this routine is) // with 'allmatches' set to true. But the DFAs support it and this // matches their behavior. (Generally, 'allmatches' is useful for // overlapping searches or leftmost anchored searches to find the // longest possible match by ignoring match priority.) // // Additionally, when we're running an anchored search, this // epsilon closure should only be computed at the beginning of the // search. If we re-computed it at every position, we would be // simulating an unanchored search when we were tasked to perform // an anchored search. if (!hm.is_some() || allmatches) && (!anchored || at == input.start()) { // Since we are adding to the 'curr' active states and since // this is for the start ID, we use a slots slice that is // guaranteed to have the right length but where every element // is absent. This is exactly what we want, because this // epsilon closure is responsible for simulating an unanchored // '(?s:.)*?' prefix. It is specifically outside of any // capturing groups, and thus, using slots that are always // absent is correct. // // Note though that we can't just use '&mut []' here, since // this epsilon closure may traverse through 'Captures' epsilon // transitions, and thus must be able to write offsets to the // slots given which are later copied to slot values in 'curr'. let slots = next.slot_table.all_absent(); self.epsilon_closure(stack, slots, curr, input, at, start_id); } if let Some(pid) = self.nexts(stack, curr, next, input, at, slots) { hm = Some(HalfMatch::new(pid, at)); } // Unless the caller asked us to return early, we need to mush on // to see if we can extend our match. (But note that 'nexts' will // quit right after seeing a match when match_kind==LeftmostFirst, // as is consistent with leftmost-first match priority.) if input.get_earliest() && hm.is_some() { break; } core::mem::swap(curr, next); next.set.clear(); at += 1; } instrument!(|c| c.eprint(&self.nfa)); hm } /// The implementation for the 'which_overlapping_matches' API. Basically, /// we do a single scan through the entire haystack (unless our regex /// or search is anchored) and record every pattern that matched. In /// particular, when MatchKind::All is used, this supports overlapping /// matches. So if we have the regexes 'sam' and 'samwise', they will /// *both* be reported in the pattern set when searching the haystack /// 'samwise'. fn which_overlapping_imp( &self, cache: &mut Cache, input: &Input<'_>, patset: &mut PatternSet, ) { // NOTE: This is effectively a copy of 'search_imp' above, but with no // captures support and instead writes patterns that matched directly // to 'patset'. See that routine for better commentary about what's // going on in this routine. We probably could unify the routines using // generics or more helper routines, but I'm not sure it's worth it. // // NOTE: We somewhat go out of our way here to support things like // 'input.get_earliest()' and 'leftmost-first' match semantics. Neither // of those seem particularly relevant to this routine, but they are // both supported by the DFA analogs of this routine by construction // and composition, so it seems like good sense to have the PikeVM // match that behavior. cache.setup_search(0); if input.is_done() { return; } assert!( input.haystack().len() < core::usize::MAX, "byte slice lengths must be less than usize MAX", ); instrument!(|c| c.reset(&self.nfa)); let allmatches = self.config.get_match_kind().continue_past_first_match(); let (anchored, start_id) = match self.start_config(input) { None => return, Some(config) => config, }; let Cache { ref mut stack, ref mut curr, ref mut next } = cache; for at in input.start()..=input.end() { let any_matches = !patset.is_empty(); if curr.set.is_empty() { if any_matches && !allmatches { break; } if anchored && at > input.start() { break; } } if !any_matches || allmatches { let slots = &mut []; self.epsilon_closure(stack, slots, curr, input, at, start_id); } self.nexts_overlapping(stack, curr, next, input, at, patset); // If we found a match and filled our set, then there is no more // additional info that we can provide. Thus, we can quit. We also // quit if the caller asked us to stop at the earliest point that // we know a match exists. if patset.is_full() || input.get_earliest() { break; } core::mem::swap(curr, next); next.set.clear(); } instrument!(|c| c.eprint(&self.nfa)); } /// Process the active states in 'curr' to find the states (written to /// 'next') we should process for the next byte in the haystack. /// /// 'stack' is used to perform a depth first traversal of the NFA when /// computing an epsilon closure. /// /// When a match is found, the slots for that match state (in 'curr') are /// copied to 'caps'. Moreover, once a match is seen, processing for 'curr' /// stops (unless the PikeVM was configured with MatchKind::All semantics). #[cfg_attr(feature = "perf-inline", inline(always))] fn nexts( &self, stack: &mut Vec<FollowEpsilon>, curr: &mut ActiveStates, next: &mut ActiveStates, input: &Input<'_>, at: usize, slots: &mut [Option<NonMaxUsize>], ) -> Option<PatternID> { instrument!(|c| c.record_state_set(&curr.set)); let mut pid = None; let ActiveStates { ref set, ref mut slot_table } = *curr; for sid in set.iter() { pid = match self.next(stack, slot_table, next, input, at, sid) { None => continue, Some(pid) => Some(pid), }; slots.copy_from_slice(slot_table.for_state(sid)); if !self.config.get_match_kind().continue_past_first_match() { break; } } pid } /// Like 'nexts', but for the overlapping case. This doesn't write any /// slots, and instead just writes which pattern matched in 'patset'. #[cfg_attr(feature = "perf-inline", inline(always))] fn nexts_overlapping( &self, stack: &mut Vec<FollowEpsilon>, curr: &mut ActiveStates, next: &mut ActiveStates, input: &Input<'_>, at: usize, patset: &mut PatternSet, ) { instrument!(|c| c.record_state_set(&curr.set)); let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); let ActiveStates { ref set, ref mut slot_table } = *curr; for sid in set.iter() { let pid = match self.next(stack, slot_table, next, input, at, sid) { None => continue, Some(pid) => pid, }; // This handles the case of finding a zero-width match that splits // a codepoint. Namely, if we're in UTF-8 mode AND we know we can // match the empty string, then the only valid way of getting to // this point with an offset that splits a codepoint is when we // have an empty match. Such matches, in UTF-8 mode, must not be // reported. So we just skip them here and pretend as if we did // not see a match. if utf8empty && !input.is_char_boundary(at) { continue; } let _ = patset.try_insert(pid); if !self.config.get_match_kind().continue_past_first_match() { break; } } } /// Starting from 'sid', if the position 'at' in the 'input' haystack has a /// transition defined out of 'sid', then add the state transitioned to and /// its epsilon closure to the 'next' set of states to explore. /// /// 'stack' is used by the epsilon closure computation to perform a depth /// first traversal of the NFA. /// /// 'curr_slot_table' should be the table of slots for the current set of /// states being explored. If there is a transition out of 'sid', then /// sid's row in the slot table is used to perform the epsilon closure. #[cfg_attr(feature = "perf-inline", inline(always))] fn next( &self, stack: &mut Vec<FollowEpsilon>, curr_slot_table: &mut SlotTable, next: &mut ActiveStates, input: &Input<'_>, at: usize, sid: StateID, ) -> Option<PatternID> { instrument!(|c| c.record_step(sid)); match *self.nfa.state(sid) { State::Fail | State::Look { .. } | State::Union { .. } | State::BinaryUnion { .. } | State::Capture { .. } => None, State::ByteRange { ref trans } => { if trans.matches(input.haystack(), at) { let slots = curr_slot_table.for_state(sid); // OK because 'at <= haystack.len() < usize::MAX', so // adding 1 will never wrap. let at = at.wrapping_add(1); self.epsilon_closure( stack, slots, next, input, at, trans.next, ); } None } State::Sparse(ref sparse) => { if let Some(next_sid) = sparse.matches(input.haystack(), at) { let slots = curr_slot_table.for_state(sid); // OK because 'at <= haystack.len() < usize::MAX', so // adding 1 will never wrap. let at = at.wrapping_add(1); self.epsilon_closure( stack, slots, next, input, at, next_sid, ); } None } State::Dense(ref dense) => { if let Some(next_sid) = dense.matches(input.haystack(), at) { let slots = curr_slot_table.for_state(sid); // OK because 'at <= haystack.len() < usize::MAX', so // adding 1 will never wrap. let at = at.wrapping_add(1); self.epsilon_closure( stack, slots, next, input, at, next_sid, ); } None } State::Match { pattern_id } => Some(pattern_id), } } /// Compute the epsilon closure of 'sid', writing the closure into 'next' /// while copying slot values from 'curr_slots' into corresponding states /// in 'next'. 'curr_slots' should be the slot values corresponding to /// 'sid'. /// /// The given 'stack' is used to perform a depth first traversal of the /// NFA by recursively following all epsilon transitions out of 'sid'. /// Conditional epsilon transitions are followed if and only if they are /// satisfied for the position 'at' in the 'input' haystack. /// /// While this routine may write to 'curr_slots', once it returns, any /// writes are undone and the original values (even if absent) are /// restored. #[cfg_attr(feature = "perf-inline", inline(always))] fn epsilon_closure( &self, stack: &mut Vec<FollowEpsilon>, curr_slots: &mut [Option<NonMaxUsize>], next: &mut ActiveStates, input: &Input<'_>, at: usize, sid: StateID, ) { instrument!(|c| { c.record_closure(sid); c.record_stack_push(sid); }); stack.push(FollowEpsilon::Explore(sid)); while let Some(frame) = stack.pop() { match frame { FollowEpsilon::RestoreCapture { slot, offset: pos } => { curr_slots[slot] = pos; } FollowEpsilon::Explore(sid) => { self.epsilon_closure_explore( stack, curr_slots, next, input, at, sid, ); } } } } /// Explore all of the epsilon transitions out of 'sid'. This is mostly /// split out from 'epsilon_closure' in order to clearly delineate /// the actual work of computing an epsilon closure from the stack /// book-keeping. /// /// This will push any additional explorations needed on to 'stack'. /// /// 'curr_slots' should refer to the slots for the currently active NFA /// state. That is, the current state we are stepping through. These /// slots are mutated in place as new 'Captures' states are traversed /// during epsilon closure, but the slots are restored to their original /// values once the full epsilon closure is completed. The ultimate use of /// 'curr_slots' is to copy them to the corresponding 'next_slots', so that /// the capturing group spans are forwarded from the currently active state /// to the next. /// /// 'next' refers to the next set of active states. Computing an epsilon /// closure may increase the next set of active states. /// /// 'input' refers to the caller's input configuration and 'at' refers to /// the current position in the haystack. These are used to check whether /// conditional epsilon transitions (like look-around) are satisfied at /// the current position. If they aren't, then the epsilon closure won't /// include them. #[cfg_attr(feature = "perf-inline", inline(always))] fn epsilon_closure_explore( &self, stack: &mut Vec<FollowEpsilon>, curr_slots: &mut [Option<NonMaxUsize>], next: &mut ActiveStates, input: &Input<'_>, at: usize, mut sid: StateID, ) { // We can avoid pushing some state IDs on to our stack in precisely // the cases where a 'push(x)' would be immediately followed by a 'x // = pop()'. This is achieved by this outer-loop. We simply set 'sid' // to be the next state ID we want to explore once we're done with // our initial exploration. In practice, this avoids a lot of stack // thrashing. loop { instrument!(|c| c.record_set_insert(sid)); // Record this state as part of our next set of active states. If // we've already explored it, then no need to do it again. if !next.set.insert(sid) { return; } match *self.nfa.state(sid) { State::Fail | State::Match { .. } | State::ByteRange { .. } | State::Sparse { .. } | State::Dense { .. } => { next.slot_table.for_state(sid).copy_from_slice(curr_slots); return; } State::Look { look, next } => { // OK because we don't permit building a searcher with a // Unicode word boundary if the requisite Unicode data is // unavailable. if !self.nfa.look_matcher().matches_inline( look, input.haystack(), at, ) { return; } sid = next; } State::Union { ref alternates } => { sid = match alternates.get(0) { None => return, Some(&sid) => sid, }; instrument!(|c| { for &alt in &alternates[1..] { c.record_stack_push(alt); } }); stack.extend( alternates[1..] .iter() .copied() .rev() .map(FollowEpsilon::Explore), ); } State::BinaryUnion { alt1, alt2 } => { sid = alt1; instrument!(|c| c.record_stack_push(sid)); stack.push(FollowEpsilon::Explore(alt2)); } State::Capture { next, slot, .. } => { // There's no need to do anything with slots that // ultimately won't be copied into the caller-provided // 'Captures' value. So we just skip dealing with them at // all. if slot.as_usize() < curr_slots.len() { instrument!(|c| c.record_stack_push(sid)); stack.push(FollowEpsilon::RestoreCapture { slot, offset: curr_slots[slot], }); // OK because length of a slice must fit into an isize. curr_slots[slot] = Some(NonMaxUsize::new(at).unwrap()); } sid = next; } } } } /// Return the starting configuration of a PikeVM search. /// /// The "start config" is basically whether the search should be anchored /// or not and the NFA state ID at which to begin the search. The state ID /// returned always corresponds to an anchored starting state even when the /// search is unanchored. This is because the PikeVM search loop deals with /// unanchored searches with an explicit epsilon closure out of the start /// state. /// /// This routine accounts for both the caller's `Input` configuration /// and the pattern itself. For example, even if the caller asks for an /// unanchored search, if the pattern itself is anchored, then this will /// always return 'true' because implementing an unanchored search in that /// case would be incorrect. /// /// Similarly, if the caller requests an anchored search for a particular /// pattern, then the starting state ID returned will reflect that. /// /// If a pattern ID is given in the input configuration that is not in /// this regex, then `None` is returned. fn start_config(&self, input: &Input<'_>) -> Option<(bool, StateID)> { match input.get_anchored() { // Only way we're unanchored is if both the caller asked for an // unanchored search *and* the pattern is itself not anchored. Anchored::No => Some(( self.nfa.is_always_start_anchored(), self.nfa.start_anchored(), )), Anchored::Yes => Some((true, self.nfa.start_anchored())), Anchored::Pattern(pid) => { Some((true, self.nfa.start_pattern(pid)?)) } } } } /// An iterator over all non-overlapping matches for a particular search. /// /// The iterator yields a [`Match`] value until no more matches could be found. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the PikeVM. /// * `'c` represents the lifetime of the PikeVM's cache. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the [`PikeVM::find_iter`] method. #[derive(Debug)] pub struct FindMatches<'r, 'c, 'h> { re: &'r PikeVM, cache: &'c mut Cache, caps: Captures, it: iter::Searcher<'h>, } impl<'r, 'c, 'h> Iterator for FindMatches<'r, 'c, 'h> { type Item = Match; #[inline] fn next(&mut self) -> Option<Match> { // Splitting 'self' apart seems necessary to appease borrowck. let FindMatches { re, ref mut cache, ref mut caps, ref mut it } = *self; // 'advance' converts errors into panics, which is OK here because // the PikeVM can never return an error. it.advance(|input| { re.search(cache, input, caps); Ok(caps.get_match()) }) } } /// An iterator over all non-overlapping leftmost matches, with their capturing /// groups, for a particular search. /// /// The iterator yields a [`Captures`] value until no more matches could be /// found. /// /// The lifetime parameters are as follows: /// /// * `'r` represents the lifetime of the PikeVM. /// * `'c` represents the lifetime of the PikeVM's cache. /// * `'h` represents the lifetime of the haystack being searched. /// /// This iterator can be created with the [`PikeVM::captures_iter`] method. #[derive(Debug)] pub struct CapturesMatches<'r, 'c, 'h> { re: &'r PikeVM, cache: &'c mut Cache, caps: Captures, it: iter::Searcher<'h>, } impl<'r, 'c, 'h> Iterator for CapturesMatches<'r, 'c, 'h> { type Item = Captures; #[inline] fn next(&mut self) -> Option<Captures> { // Splitting 'self' apart seems necessary to appease borrowck. let CapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = *self; // 'advance' converts errors into panics, which is OK here because // the PikeVM can never return an error. it.advance(|input| { re.search(cache, input, caps); Ok(caps.get_match()) }); if caps.is_match() { Some(caps.clone()) } else { None } } } /// A cache represents mutable state that a [`PikeVM`] requires during a /// search. /// /// For a given [`PikeVM`], its corresponding cache may be created either via /// [`PikeVM::create_cache`], or via [`Cache::new`]. They are equivalent in /// every way, except the former does not require explicitly importing `Cache`. /// /// A particular `Cache` is coupled with the [`PikeVM`] from which it /// was created. It may only be used with that `PikeVM`. A cache and its /// allocations may be re-purposed via [`Cache::reset`], in which case, it can /// only be used with the new `PikeVM` (and not the old one). #[derive(Clone, Debug)] pub struct Cache { /// Stack used while computing epsilon closure. This effectively lets us /// move what is more naturally expressed through recursion to a stack /// on the heap. stack: Vec<FollowEpsilon>, /// The current active states being explored for the current byte in the /// haystack. curr: ActiveStates, /// The next set of states we're building that will be explored for the /// next byte in the haystack. next: ActiveStates, } impl Cache { /// Create a new [`PikeVM`] cache. /// /// A potentially more convenient routine to create a cache is /// [`PikeVM::create_cache`], as it does not require also importing the /// `Cache` type. /// /// If you want to reuse the returned `Cache` with some other `PikeVM`, /// then you must call [`Cache::reset`] with the desired `PikeVM`. pub fn new(re: &PikeVM) -> Cache { Cache { stack: vec![], curr: ActiveStates::new(re), next: ActiveStates::new(re), } } /// Reset this cache such that it can be used for searching with a /// different [`PikeVM`]. /// /// A cache reset permits reusing memory already allocated in this cache /// with a different `PikeVM`. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different `PikeVM`. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; /// /// let re1 = PikeVM::new(r"\w")?; /// let re2 = PikeVM::new(r"\W")?; /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Match::must(0, 0..2)), /// re1.find_iter(&mut cache, "Δ").next(), /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the PikeVM we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// cache.reset(&re2); /// assert_eq!( /// Some(Match::must(0, 0..3)), /// re2.find_iter(&mut cache, "☃").next(), /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset(&mut self, re: &PikeVM) { self.curr.reset(re); self.next.reset(re); } /// Returns the heap memory usage, in bytes, of this cache. /// /// This does **not** include the stack size used up by this cache. To /// compute that, use `std::mem::size_of::<Cache>()`. pub fn memory_usage(&self) -> usize { use core::mem::size_of; (self.stack.len() * size_of::<FollowEpsilon>()) + self.curr.memory_usage() + self.next.memory_usage() } /// Clears this cache. This should be called at the start of every search /// to ensure we start with a clean slate. /// /// This also sets the length of the capturing groups used in the current /// search. This permits an optimization where by 'SlotTable::for_state' /// only returns the number of slots equivalent to the number of slots /// given in the 'Captures' value. This may be less than the total number /// of possible slots, e.g., when one only wants to track overall match /// offsets. This in turn permits less copying of capturing group spans /// in the PikeVM. fn setup_search(&mut self, captures_slot_len: usize) { self.stack.clear(); self.curr.setup_search(captures_slot_len); self.next.setup_search(captures_slot_len); } } /// A set of active states used to "simulate" the execution of an NFA via the /// PikeVM. /// /// There are two sets of these used during NFA simulation. One set corresponds /// to the "current" set of states being traversed for the current position /// in a haystack. The other set corresponds to the "next" set of states being /// built, which will become the new "current" set for the next position in the /// haystack. These two sets correspond to CLIST and NLIST in Thompson's /// original paper regexes: https://dl.acm.org/doi/pdf/10.1145/363347.363387 /// /// In addition to representing a set of NFA states, this also maintains slot /// values for each state. These slot values are what turn the NFA simulation /// into the "Pike VM." Namely, they track capturing group values for each /// state. During the computation of epsilon closure, we copy slot values from /// states in the "current" set to the "next" set. Eventually, once a match /// is found, the slot values for that match state are what we write to the /// caller provided 'Captures' value. #[derive(Clone, Debug)] struct ActiveStates { /// The set of active NFA states. This set preserves insertion order, which /// is critical for simulating the match semantics of backtracking regex /// engines. set: SparseSet, /// The slots for every NFA state, where each slot stores a (possibly /// absent) offset. Every capturing group has two slots. One for a start /// offset and one for an end offset. slot_table: SlotTable, } impl ActiveStates { /// Create a new set of active states for the given PikeVM. The active /// states returned may only be used with the given PikeVM. (Use 'reset' /// to re-purpose the allocation for a different PikeVM.) fn new(re: &PikeVM) -> ActiveStates { let mut active = ActiveStates { set: SparseSet::new(0), slot_table: SlotTable::new(), }; active.reset(re); active } /// Reset this set of active states such that it can be used with the given /// PikeVM (and only that PikeVM). fn reset(&mut self, re: &PikeVM) { self.set.resize(re.get_nfa().states().len()); self.slot_table.reset(re); } /// Return the heap memory usage, in bytes, used by this set of active /// states. /// /// This does not include the stack size of this value. fn memory_usage(&self) -> usize { self.set.memory_usage() + self.slot_table.memory_usage() } /// Setup this set of active states for a new search. The given slot /// length should be the number of slots in a caller provided 'Captures' /// (and may be zero). fn setup_search(&mut self, captures_slot_len: usize) { self.set.clear(); self.slot_table.setup_search(captures_slot_len); } } /// A table of slots, where each row represent a state in an NFA. Thus, the /// table has room for storing slots for every single state in an NFA. /// /// This table is represented with a single contiguous allocation. In general, /// the notion of "capturing group" doesn't really exist at this level of /// abstraction, hence the name "slot" instead. (Indeed, every capturing group /// maps to a pair of slots, one for the start offset and one for the end /// offset.) Slots are indexed by the 'Captures' NFA state. /// /// N.B. Not every state actually needs a row of slots. Namely, states that /// only have epsilon transitions currently never have anything written to /// their rows in this table. Thus, the table is somewhat wasteful in its heap /// usage. However, it is important to maintain fast random access by state /// ID, which means one giant table tends to work well. RE2 takes a different /// approach here and allocates each row as its own reference counted thing. /// I explored such a strategy at one point here, but couldn't get it to work /// well using entirely safe code. (To the ambitious reader: I encourage you to /// re-litigate that experiment.) I very much wanted to stick to safe code, but /// could be convinced otherwise if there was a solid argument and the safety /// was encapsulated well. #[derive(Clone, Debug)] struct SlotTable { /// The actual table of offsets. table: Vec<Option<NonMaxUsize>>, /// The number of slots per state, i.e., the table's stride or the length /// of each row. slots_per_state: usize, /// The number of slots in the caller-provided 'Captures' value for the /// current search. Setting this to 'slots_per_state' is always correct, /// but may be wasteful. slots_for_captures: usize, } impl SlotTable { /// Create a new slot table. /// /// One should call 'reset' with the corresponding PikeVM before use. fn new() -> SlotTable { SlotTable { table: vec![], slots_for_captures: 0, slots_per_state: 0 } } /// Reset this slot table such that it can be used with the given PikeVM /// (and only that PikeVM). fn reset(&mut self, re: &PikeVM) { let nfa = re.get_nfa(); self.slots_per_state = nfa.group_info().slot_len(); // This is always correct, but may be reduced for a particular search // if a 'Captures' has fewer slots, e.g., none at all or only slots // for tracking the overall match instead of all slots for every // group. self.slots_for_captures = core::cmp::max( self.slots_per_state, nfa.pattern_len().checked_mul(2).unwrap(), ); let len = nfa .states() .len() .checked_mul(self.slots_per_state) // Add space to account for scratch space used during a search. .and_then(|x| x.checked_add(self.slots_for_captures)) // It seems like this could actually panic on legitimate inputs on // 32-bit targets, and very likely to panic on 16-bit. Should we // somehow convert this to an error? What about something similar // for the lazy DFA cache? If you're tripping this assert, please // file a bug. .expect("slot table length doesn't overflow"); // This happens about as often as a regex is compiled, so it probably // should be at debug level, but I found it quite distracting and not // particularly useful. trace!( "resizing PikeVM active states table to {} entries \ (slots_per_state={})", len, self.slots_per_state, ); self.table.resize(len, None); } /// Return the heap memory usage, in bytes, used by this slot table. /// /// This does not include the stack size of this value. fn memory_usage(&self) -> usize { self.table.len() * core::mem::size_of::<Option<NonMaxUsize>>() } /// Perform any per-search setup for this slot table. /// /// In particular, this sets the length of the number of slots used in the /// 'Captures' given by the caller (if any at all). This number may be /// smaller than the total number of slots available, e.g., when the caller /// is only interested in tracking the overall match and not the spans of /// every matching capturing group. Only tracking the overall match can /// save a substantial amount of time copying capturing spans during a /// search. fn setup_search(&mut self, captures_slot_len: usize) { self.slots_for_captures = captures_slot_len; } /// Return a mutable slice of the slots for the given state. /// /// Note that the length of the slice returned may be less than the total /// number of slots available for this state. In particular, the length /// always matches the number of slots indicated via 'setup_search'. fn for_state(&mut self, sid: StateID) -> &mut [Option<NonMaxUsize>] { let i = sid.as_usize() * self.slots_per_state; &mut self.table[i..i + self.slots_for_captures] } /// Return a slice of slots of appropriate length where every slot offset /// is guaranteed to be absent. This is useful in cases where you need to /// compute an epsilon closure outside of the user supplied regex, and thus /// never want it to have any capturing slots set. fn all_absent(&mut self) -> &mut [Option<NonMaxUsize>] { let i = self.table.len() - self.slots_for_captures; &mut self.table[i..i + self.slots_for_captures] } } /// Represents a stack frame for use while computing an epsilon closure. /// /// (An "epsilon closure" refers to the set of reachable NFA states from a /// single state without consuming any input. That is, the set of all epsilon /// transitions not only from that single state, but from every other state /// reachable by an epsilon transition as well. This is why it's called a /// "closure." Computing an epsilon closure is also done during DFA /// determinization! Compare and contrast the epsilon closure here in this /// PikeVM and the one used for determinization in crate::util::determinize.) /// /// Computing the epsilon closure in a Thompson NFA proceeds via a depth /// first traversal over all epsilon transitions from a particular state. /// (A depth first traversal is important because it emulates the same priority /// of matches that is typically found in backtracking regex engines.) This /// depth first traversal is naturally expressed using recursion, but to avoid /// a call stack size proportional to the size of a regex, we put our stack on /// the heap instead. /// /// This stack thus consists of call frames. The typical call frame is /// `Explore`, which instructs epsilon closure to explore the epsilon /// transitions from that state. (Subsequent epsilon transitions are then /// pushed on to the stack as more `Explore` frames.) If the state ID being /// explored has no epsilon transitions, then the capturing group slots are /// copied from the original state that sparked the epsilon closure (from the /// 'step' routine) to the state ID being explored. This way, capturing group /// slots are forwarded from the previous state to the next. /// /// The other stack frame, `RestoreCaptures`, instructs the epsilon closure to /// set the position for a particular slot back to some particular offset. This /// frame is pushed when `Explore` sees a `Capture` transition. `Explore` will /// set the offset of the slot indicated in `Capture` to the current offset, /// and then push the old offset on to the stack as a `RestoreCapture` frame. /// Thus, the new offset is only used until the epsilon closure reverts back to /// the `RestoreCapture` frame. In effect, this gives the `Capture` epsilon /// transition its "scope" to only states that come "after" it during depth /// first traversal. #[derive(Clone, Debug)] enum FollowEpsilon { /// Explore the epsilon transitions from a state ID. Explore(StateID), /// Reset the given `slot` to the given `offset` (which might be `None`). RestoreCapture { slot: SmallIndex, offset: Option<NonMaxUsize> }, } /// A set of counters that "instruments" a PikeVM search. To enable this, you /// must enable the 'internal-instrument-pikevm' feature. Then run your Rust /// program with RUST_LOG=regex_automata::nfa::thompson::pikevm=trace set in /// the environment. The metrics collected will be dumped automatically for /// every search executed by the PikeVM. /// /// NOTE: When 'internal-instrument-pikevm' is enabled, it will likely cause an /// absolute decrease in wall-clock performance, even if the 'trace' log level /// isn't enabled. (Although, we do try to avoid extra costs when 'trace' isn't /// enabled.) The main point of instrumentation is to get counts of various /// events that occur during the PikeVM's execution. /// /// This is a somewhat hacked together collection of metrics that are useful /// to gather from a PikeVM search. In particular, it lets us scrutinize the /// performance profile of a search beyond what general purpose profiling tools /// give us. Namely, we orient the profiling data around the specific states of /// the NFA. /// /// In other words, this lets us see which parts of the NFA graph are most /// frequently activated. This then provides direction for optimization /// opportunities. /// /// The really sad part about this is that it absolutely clutters up the PikeVM /// implementation. :'( Another approach would be to just manually add this /// code in whenever I want this kind of profiling data, but it's complicated /// and tedious enough that I went with this approach... for now. /// /// When instrumentation is enabled (which also turns on 'logging'), then a /// `Counters` is initialized for every search and `trace`'d just before the /// search returns to the caller. /// /// Tip: When debugging performance problems with the PikeVM, it's best to try /// to work with an NFA that is as small as possible. Otherwise the state graph /// is likely to be too big to digest. #[cfg(feature = "internal-instrument-pikevm")] #[derive(Clone, Debug)] struct Counters { /// The number of times the NFA is in a particular permutation of states. state_sets: alloc::collections::BTreeMap<Vec<StateID>, u64>, /// The number of times 'step' is called for a particular state ID (which /// indexes this array). steps: Vec<u64>, /// The number of times an epsilon closure was computed for a state. closures: Vec<u64>, /// The number of times a particular state ID is pushed on to a stack while /// computing an epsilon closure. stack_pushes: Vec<u64>, /// The number of times a particular state ID is inserted into a sparse set /// while computing an epsilon closure. set_inserts: Vec<u64>, } #[cfg(feature = "internal-instrument-pikevm")] impl Counters { fn empty() -> Counters { Counters { state_sets: alloc::collections::BTreeMap::new(), steps: vec![], closures: vec![], stack_pushes: vec![], set_inserts: vec![], } } fn reset(&mut self, nfa: &NFA) { let len = nfa.states().len(); self.state_sets.clear(); self.steps.clear(); self.steps.resize(len, 0); self.closures.clear(); self.closures.resize(len, 0); self.stack_pushes.clear(); self.stack_pushes.resize(len, 0); self.set_inserts.clear(); self.set_inserts.resize(len, 0); } fn eprint(&self, nfa: &NFA) { trace!("===== START PikeVM Instrumentation Output ====="); // We take the top-K most occurring state sets. Otherwise the output // is likely to be overwhelming. And we probably only care about the // most frequently occurring ones anyway. const LIMIT: usize = 20; let mut set_counts = self.state_sets.iter().collect::<Vec<(&Vec<StateID>, &u64)>>(); set_counts.sort_by_key(|(_, &count)| core::cmp::Reverse(count)); trace!("## PikeVM frequency of state sets (top {})", LIMIT); for (set, count) in set_counts.iter().take(LIMIT) { trace!("{:?}: {}", set, count); } if set_counts.len() > LIMIT { trace!( "... {} sets omitted (out of {} total)", set_counts.len() - LIMIT, set_counts.len(), ); } trace!(""); trace!("## PikeVM total frequency of events"); trace!( "steps: {}, closures: {}, stack-pushes: {}, set-inserts: {}", self.steps.iter().copied().sum::<u64>(), self.closures.iter().copied().sum::<u64>(), self.stack_pushes.iter().copied().sum::<u64>(), self.set_inserts.iter().copied().sum::<u64>(), ); trace!(""); trace!("## PikeVM frequency of events broken down by state"); for sid in 0..self.steps.len() { trace!( "{:06}: steps: {}, closures: {}, \ stack-pushes: {}, set-inserts: {}", sid, self.steps[sid], self.closures[sid], self.stack_pushes[sid], self.set_inserts[sid], ); } trace!(""); trace!("## NFA debug display"); trace!("{:?}", nfa); trace!("===== END PikeVM Instrumentation Output ====="); } fn record_state_set(&mut self, set: &SparseSet) { let set = set.iter().collect::<Vec<StateID>>(); *self.state_sets.entry(set).or_insert(0) += 1; } fn record_step(&mut self, sid: StateID) { self.steps[sid] += 1; } fn record_closure(&mut self, sid: StateID) { self.closures[sid] += 1; } fn record_stack_push(&mut self, sid: StateID) { self.stack_pushes[sid] += 1; } fn record_set_insert(&mut self, sid: StateID) { self.set_inserts[sid] += 1; } } <file_sep>/regex-cli/args/syntax.rs use std::borrow::Borrow; use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::util::syntax, regex_syntax::{ast::Ast, hir::Hir}, }; use crate::args::{self, Configurable, Usage}; /// This exposes all of the configuration knobs on a /// regex_automata::util::syntax::Config via CLI flags. #[derive(Debug, Default)] pub struct Config { syntax: syntax::Config, } impl Config { /// Return a `syntax::Config` object from this configuration. pub fn syntax(&self) -> anyhow::Result<syntax::Config> { Ok(self.syntax.clone()) } /// Parses the given pattern into an `Ast`. fn ast(&self, pattern: &str) -> anyhow::Result<Ast> { regex_syntax::ast::parse::ParserBuilder::new() .nest_limit(self.syntax.get_nest_limit()) .octal(self.syntax.get_octal()) .ignore_whitespace(self.syntax.get_ignore_whitespace()) .build() .parse(pattern) .context("failed to parse pattern") } /// Parses the given patterns into a corresponding sequence of `Ast`s. If /// any of the patterns fail to parse, then an error is returned. pub fn asts<P: AsRef<str>>( &self, patterns: &[P], ) -> anyhow::Result<Vec<Ast>> { patterns .iter() .enumerate() .map(|(i, p)| { let p = p.as_ref(); self.ast(p).with_context(|| { format!("failed to parse pattern {} to AST: '{}'", i, p,) }) }) .collect() } /// Translates the given pattern and `Ast` into an `Hir`. pub fn hir(&self, pattern: &str, ast: &Ast) -> anyhow::Result<Hir> { regex_syntax::hir::translate::TranslatorBuilder::new() .utf8(self.syntax.get_utf8()) .case_insensitive(self.syntax.get_case_insensitive()) .multi_line(self.syntax.get_multi_line()) .dot_matches_new_line(self.syntax.get_dot_matches_new_line()) .swap_greed(self.syntax.get_swap_greed()) .unicode(self.syntax.get_unicode()) .build() .translate(pattern, ast) .context("failed to translate pattern") } /// Translates the given patterns and corresponding `Ast`s into a /// corresponding sequence of `Hir`s. If any of the patterns fail to /// translate, then an error is returned. pub fn hirs<P: AsRef<str>, A: Borrow<Ast>>( &self, patterns: &[P], asts: &[A], ) -> anyhow::Result<Vec<Hir>> { patterns .iter() .zip(asts.iter()) .enumerate() .map(|(i, (pat, ast))| { let (pat, ast) = (pat.as_ref(), ast.borrow()); self.hir(pat, ast).with_context(|| { format!( "failed to translate pattern {} to HIR: '{}'", i, pat, ) }) }) .collect() } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('i') | Arg::Long("case-insensitive") => { self.syntax = self.syntax.case_insensitive(true); } Arg::Long("multi-line") => { self.syntax = self.syntax.multi_line(true); } Arg::Long("dot-matches-new-line") => { self.syntax = self.syntax.dot_matches_new_line(true); } Arg::Long("crlf") => { self.syntax = self.syntax.crlf(true); } Arg::Long("swap-greed") => { self.syntax = self.syntax.swap_greed(true); } Arg::Long("ignore-whitespace") => { self.syntax = self.syntax.ignore_whitespace(true); } Arg::Short('U') | Arg::Long("no-unicode") => { self.syntax = self.syntax.unicode(false); } Arg::Short('b') | Arg::Long("no-utf8-syntax") => { self.syntax = self.syntax.utf8(false); } Arg::Long("nest-limit") => { let limit = args::parse(p, "--nest-limit")?; self.syntax = self.syntax.nest_limit(limit); } Arg::Long("octal") => { self.syntax = self.syntax.octal(true); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[ Usage::new( "-i, --case-insensitive", "Enable case insensitive mode.", r#" This enables case insensitive mode for all regex patterns given. When absent, all patterns are matched case sensitively. Note that individual patterns can have case insensitivity enabled via the inline regex flag 'i'. For example, '(?i:abc)'. "#, ), Usage::new( "--multi-line", "Enable multi-line mode.", r#" This enables multi-line mode for all regex patterns given. When multi-line mode is enabled, the anchors '^' and '$' turn into line anchors. That is, in addition to matching the start and end of a haystack, they also match at the start and end of a line, respectively. Note that individual patterns can have multi-line mode enabled via the inline regex flag 'm'. For example, '(?m:^)'. "#, ), Usage::new( "--dot-matches-new-line", "Make a dot match \\n.", r#" Enabling this causes a '.' (dot) to match the line terminator. By default, a dot is equivalent to '[^\n]'. (When CRLF mode is enabled it is equivalent to '[^\r\n]'.) Note that individual patterns can have this mode enabled via the inline regex flag 's'. For example, '(?s:.)'. "#, ), Usage::new( "--crlf", "Line anchors are CRLF aware.", r#" When enabled, line anchors become CRLF aware. That is, patterns like '(?m:^)' and '(?m:$)' only consider '\n' by default. But when CRLF mode is enabled, line anchors consider both '\r' and '\n'. In particular, line anchors will match both '\r' and '\n', but never between '\r' and '\n'. Additionally, when this mode is enabled, '.' is equivalent to '[^\r\n]' instead of '[^\n]'. Note that this does not enable multi-line mode by itself. This only applies to '^' and '$' when multi-line mode is enabled. Note that individual patterns can have CRLF mode enabled via the inline regex flag 'R'. For example, '(?Rm:^)'. "#, ), Usage::new( "--swap-greed", "Swap the meaning of greediness.", r#" This enables "swap greed" mode for all regex patterns given. When greediness is swapped, greedy patterns like 'a+' become equivalent to 'a+?', and ungreedy patterns like 'a+?' become equivalent to 'a+'. Note that individual patterns can have "swap greed" mode enabled via the inline regex flag 'U'. For example, '(?U:a+)'. "#, ), Usage::new( "--ignore-whitespace", "Enable whitespace insensitive mode.", r#" This enables whitespace insensitive mode for all regex patterns given. When enabled, all whitespace in regex patterns is ignored. Moreover, any lines whose first non-whitespace character is '#' will be ignored and treated as a comment. Note that individual patterns can have whitespace insensitivity enabled via the inline regex flag 'x'. For example, '(?x:a b c)' is equivalent to 'abc'. "#, ), Usage::new( "-U, --no-unicode", "Disable Unicode mode.", r#" This disables Unicode mode for all regex patterns given. When Unicode mode is disabled, the logical unit of searching is a single byte, where as when it is enabled the logical unit of searching is a single codepoint. In practice, this means that Unicode mode makes a number of alterations to the syntax and semantics of a regex. 1) '[^a]' matches any codepoint that isn't 'a' instead of any byte that isn't 'a'. 2) Case insensitive mode takes Unicode simple case folding rules into account. 3) Unicode literals and character classes are allowed. Note that individual patterns can have Unicode mode disabled via the inline regex flag 'u'. For example, '(?-u:\xFF)' matches the byte '\xFF' where as '(?u:\xFF)' matches the UTF-8 encoding of the Unicode codepoint U+00FF. "#, ), Usage::new( "-b, --no-utf8-syntax", "Disable UTF-8 mode for the regex syntax.", r#" This disables UTF-8 mode for all regex patterns given. Disabling UTF-8 mode permits regexes that match invalid UTF-8. When UTF-8 mode is enabled, then patterns are limited to matches corresponding to valid UTF-8. This only applies to non-empty matches. For empty matches, the UTF-8 mode is controlled on the NFA, via --no-utf8-nfa (if applicable). "#, ), Usage::new( "--nest-limit", "Set the nest limit on the syntax.", r#" This sets the nesting limit of the regex syntax on all patterns. This controls how many "nested" constructs are permitted in the pattern. This is useful for preventing pathological regexes that require too much nesting. For example, if one wants to do recursive analysis on the syntax of a regex, you usually need to check that it doesn't have too much nesting or else you risk a stack overflow. Note that the default is likely big enough to permit most regex patterns. "#, ), Usage::new( "--octal", "Permit octal escapes.", r#" This permits octal escape sequences in the regex syntax. For example, it treats '\17' as equivalent to '\x0F'. This is disabled by default. "#, ), ]; USAGES } } <file_sep>/regex-automata/src/util/escape.rs /*! Provides convenience routines for escaping raw bytes. Since this crate tends to deal with `&[u8]` everywhere and the default `Debug` implementation just shows decimal integers, it makes debugging those representations quite difficult. This module provides types that show `&[u8]` as if it were a string, with invalid UTF-8 escaped into its byte-by-byte hex representation. */ use crate::util::utf8; /// Provides a convenient `Debug` implementation for a `u8`. /// /// The `Debug` impl treats the byte as an ASCII, and emits a human readable /// representation of it. If the byte isn't ASCII, then it's emitted as a hex /// escape sequence. #[derive(Clone, Copy)] pub struct DebugByte(pub u8); impl core::fmt::Debug for DebugByte { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { // Special case ASCII space. It's too hard to read otherwise, so // put quotes around it. I sometimes wonder whether just '\x20' would // be better... if self.0 == b' ' { return write!(f, "' '"); } // 10 bytes is enough to cover any output from ascii::escape_default. let mut bytes = [0u8; 10]; let mut len = 0; for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { // capitalize \xab to \xAB if i >= 2 && b'a' <= b && b <= b'f' { b -= 32; } bytes[len] = b; len += 1; } write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) } } /// Provides a convenient `Debug` implementation for `&[u8]`. /// /// This generally works best when the bytes are presumed to be mostly UTF-8, /// but will work for anything. For any bytes that aren't UTF-8, they are /// emitted as hex escape sequences. pub struct DebugHaystack<'a>(pub &'a [u8]); impl<'a> core::fmt::Debug for DebugHaystack<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "\"")?; // This is a sad re-implementation of a similar impl found in bstr. let mut bytes = self.0; while let Some(result) = utf8::decode(bytes) { let ch = match result { Ok(ch) => ch, Err(byte) => { write!(f, r"\x{:02x}", byte)?; bytes = &bytes[1..]; continue; } }; bytes = &bytes[ch.len_utf8()..]; match ch { '\0' => write!(f, "\\0")?, // ASCII control characters except \0, \n, \r, \t '\x01'..='\x08' | '\x0b' | '\x0c' | '\x0e'..='\x19' | '\x7f' => { write!(f, "\\x{:02x}", u32::from(ch))?; } '\n' | '\r' | '\t' | _ => { write!(f, "{}", ch.escape_debug())?; } } } write!(f, "\"")?; Ok(()) } } <file_sep>/regex-automata/src/util/prefilter/byteset.rs use crate::util::{ prefilter::PrefilterI, search::{MatchKind, Span}, }; #[derive(Clone, Debug)] pub(crate) struct ByteSet([bool; 256]); impl ByteSet { pub(crate) fn new<B: AsRef<[u8]>>( _kind: MatchKind, needles: &[B], ) -> Option<ByteSet> { #[cfg(not(feature = "perf-literal-multisubstring"))] { None } #[cfg(feature = "perf-literal-multisubstring")] { let mut set = [false; 256]; for needle in needles.iter() { let needle = needle.as_ref(); if needle.len() != 1 { return None; } set[usize::from(needle[0])] = true; } Some(ByteSet(set)) } } } impl PrefilterI for ByteSet { fn find(&self, haystack: &[u8], span: Span) -> Option<Span> { haystack[span].iter().position(|&b| self.0[usize::from(b)]).map(|i| { let start = span.start + i; let end = start + 1; Span { start, end } }) } fn prefix(&self, haystack: &[u8], span: Span) -> Option<Span> { let b = *haystack.get(span.start)?; if self.0[usize::from(b)] { Some(Span { start: span.start, end: span.start + 1 }) } else { None } } fn memory_usage(&self) -> usize { 0 } fn is_fast(&self) -> bool { false } } <file_sep>/testdata/substring.toml # These tests check that regex engines perform as expected when the search is # instructed to only search a substring of a haystack instead of the entire # haystack. This tends to exercise interesting edge cases that are otherwise # difficult to provoke. (But not necessarily impossible. Regex search iterators # for example, make use of the "search just a substring" APIs by changing the # starting position of a search to the end position of the previous match.) [[test]] name = "unicode-word-start" regex = '\b[0-9]+\b' haystack = "β123" bounds = { start = 2, end = 5 } matches = [] [[test]] name = "unicode-word-end" regex = '\b[0-9]+\b' haystack = "123β" bounds = { start = 0, end = 3 } matches = [] [[test]] name = "ascii-word-start" regex = '\b[0-9]+\b' haystack = "β123" bounds = { start = 2, end = 5 } matches = [[2, 5]] unicode = false [[test]] name = "ascii-word-end" regex = '\b[0-9]+\b' haystack = "123β" bounds = { start = 0, end = 3 } matches = [[0, 3]] unicode = false <file_sep>/regex-automata/src/util/sparse_set.rs /*! This module defines a sparse set data structure. Its most interesting properties are: * They preserve insertion order. * Set membership testing is done in constant time. * Set insertion is done in constant time. * Clearing the set is done in constant time. The cost for doing this is that the capacity of the set needs to be known up front, and the elements in the set are limited to state identifiers. These sets are principally used when traversing an NFA state graph. This happens at search time, for example, in the PikeVM. It also happens during DFA determinization. */ use alloc::{vec, vec::Vec}; use crate::util::primitives::StateID; /// A pairse of sparse sets. /// /// This is useful when one needs to compute NFA epsilon closures from a /// previous set of states derived from an epsilon closure. One set can be the /// starting states where as the other set can be the destination states after /// following the transitions for a particular byte of input. /// /// There is no significance to 'set1' or 'set2'. They are both sparse sets of /// the same size. /// /// The members of this struct are exposed so that callers may borrow 'set1' /// and 'set2' individually without being force to borrow both at the same /// time. #[derive(Clone, Debug)] pub(crate) struct SparseSets { pub(crate) set1: SparseSet, pub(crate) set2: SparseSet, } impl SparseSets { /// Create a new pair of sparse sets where each set has the given capacity. /// /// This panics if the capacity given is bigger than `StateID::LIMIT`. pub(crate) fn new(capacity: usize) -> SparseSets { SparseSets { set1: SparseSet::new(capacity), set2: SparseSet::new(capacity), } } /// Resizes these sparse sets to have the new capacity given. /// /// The sets are automatically cleared. /// /// This panics if the capacity given is bigger than `StateID::LIMIT`. #[inline] pub(crate) fn resize(&mut self, new_capacity: usize) { self.set1.resize(new_capacity); self.set2.resize(new_capacity); } /// Clear both sparse sets. pub(crate) fn clear(&mut self) { self.set1.clear(); self.set2.clear(); } /// Swap set1 with set2. pub(crate) fn swap(&mut self) { core::mem::swap(&mut self.set1, &mut self.set2); } /// Returns the memory usage, in bytes, used by this pair of sparse sets. pub(crate) fn memory_usage(&self) -> usize { self.set1.memory_usage() + self.set2.memory_usage() } } /// A sparse set used for representing ordered NFA states. /// /// This supports constant time addition and membership testing. Clearing an /// entire set can also be done in constant time. Iteration yields elements /// in the order in which they were inserted. /// /// The data structure is based on: https://research.swtch.com/sparse /// Note though that we don't actually use uninitialized memory. We generally /// reuse sparse sets, so the initial allocation cost is bareable. However, its /// other properties listed above are extremely useful. #[derive(Clone)] pub(crate) struct SparseSet { /// The number of elements currently in this set. len: usize, /// Dense contains the ids in the order in which they were inserted. dense: Vec<StateID>, /// Sparse maps ids to their location in dense. /// /// A state ID is in the set if and only if /// sparse[id] < len && id == dense[sparse[id]]. /// /// Note that these are indices into 'dense'. It's a little weird to use /// StateID here, but we know our length can never exceed the bounds of /// StateID (enforced by 'resize') and StateID will be at most 4 bytes /// where as a usize is likely double that in most cases. sparse: Vec<StateID>, } impl SparseSet { /// Create a new sparse set with the given capacity. /// /// Sparse sets have a fixed size and they cannot grow. Attempting to /// insert more distinct elements than the total capacity of the set will /// result in a panic. /// /// This panics if the capacity given is bigger than `StateID::LIMIT`. #[inline] pub(crate) fn new(capacity: usize) -> SparseSet { let mut set = SparseSet { len: 0, dense: vec![], sparse: vec![] }; set.resize(capacity); set } /// Resizes this sparse set to have the new capacity given. /// /// This set is automatically cleared. /// /// This panics if the capacity given is bigger than `StateID::LIMIT`. #[inline] pub(crate) fn resize(&mut self, new_capacity: usize) { assert!( new_capacity <= StateID::LIMIT, "sparse set capacity cannot excced {:?}", StateID::LIMIT ); self.clear(); self.dense.resize(new_capacity, StateID::ZERO); self.sparse.resize(new_capacity, StateID::ZERO); } /// Returns the capacity of this set. /// /// The capacity represents a fixed limit on the number of distinct /// elements that are allowed in this set. The capacity cannot be changed. #[inline] pub(crate) fn capacity(&self) -> usize { self.dense.len() } /// Returns the number of elements in this set. #[inline] pub(crate) fn len(&self) -> usize { self.len } /// Returns true if and only if this set is empty. #[inline] pub(crate) fn is_empty(&self) -> bool { self.len() == 0 } /// Insert the state ID value into this set and return true if the given /// state ID was not previously in this set. /// /// This operation is idempotent. If the given value is already in this /// set, then this is a no-op. /// /// If more than `capacity` ids are inserted, then this panics. /// /// This is marked as inline(always) since the compiler won't inline it /// otherwise, and it's a fairly hot piece of code in DFA determinization. #[cfg_attr(feature = "perf-inline", inline(always))] pub(crate) fn insert(&mut self, id: StateID) -> bool { if self.contains(id) { return false; } let i = self.len(); assert!( i < self.capacity(), "{:?} exceeds capacity of {:?} when inserting {:?}", i, self.capacity(), id, ); // OK since i < self.capacity() and self.capacity() is guaranteed to // be <= StateID::LIMIT. let index = StateID::new_unchecked(i); self.dense[index] = id; self.sparse[id] = index; self.len += 1; true } /// Returns true if and only if this set contains the given value. #[inline] pub(crate) fn contains(&self, id: StateID) -> bool { let index = self.sparse[id]; index.as_usize() < self.len() && self.dense[index] == id } /// Clear this set such that it has no members. #[inline] pub(crate) fn clear(&mut self) { self.len = 0; } #[inline] pub(crate) fn iter(&self) -> SparseSetIter<'_> { SparseSetIter(self.dense[..self.len()].iter()) } /// Returns the heap memory usage, in bytes, used by this sparse set. #[inline] pub(crate) fn memory_usage(&self) -> usize { self.dense.len() * StateID::SIZE + self.sparse.len() * StateID::SIZE } } impl core::fmt::Debug for SparseSet { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let elements: Vec<StateID> = self.iter().collect(); f.debug_tuple("SparseSet").field(&elements).finish() } } /// An iterator over all elements in a sparse set. /// /// The lifetime `'a` refers to the lifetime of the set being iterated over. #[derive(Debug)] pub(crate) struct SparseSetIter<'a>(core::slice::Iter<'a, StateID>); impl<'a> Iterator for SparseSetIter<'a> { type Item = StateID; #[cfg_attr(feature = "perf-inline", inline(always))] fn next(&mut self) -> Option<StateID> { self.0.next().map(|&id| id) } } <file_sep>/tests/regression.rs use regex::Regex; macro_rules! regex { ($pattern:expr) => { regex::Regex::new($pattern).unwrap() }; } // See: https://github.com/rust-lang/regex/issues/48 #[test] fn invalid_regexes_no_crash() { assert!(Regex::new("(*)").is_err()); assert!(Regex::new("(?:?)").is_err()); assert!(Regex::new("(?)").is_err()); assert!(Regex::new("*").is_err()); } // See: https://github.com/rust-lang/regex/issues/98 #[test] fn regression_many_repeat_stack_overflow() { let re = regex!("^.{1,2500}"); assert_eq!( vec![0..1], re.find_iter("a").map(|m| m.range()).collect::<Vec<_>>() ); } // See: https://github.com/rust-lang/regex/issues/555 #[test] fn regression_invalid_repetition_expr() { assert!(Regex::new("(?m){1,1}").is_err()); } // See: https://github.com/rust-lang/regex/issues/527 #[test] fn regression_invalid_flags_expression() { assert!(Regex::new("(((?x)))").is_ok()); } // See: https://github.com/rust-lang/regex/issues/129 #[test] fn regression_captures_rep() { let re = regex!(r"([a-f]){2}(?P<foo>[x-z])"); let caps = re.captures("abx").unwrap(); assert_eq!(&caps["foo"], "x"); } // See: https://github.com/BurntSushi/ripgrep/issues/1247 #[cfg(feature = "unicode-perl")] #[test] fn regression_nfa_stops1() { let re = regex::bytes::Regex::new(r"\bs(?:[ab])").unwrap(); assert_eq!(0, re.find_iter(b"s\xE4").count()); } // See: https://github.com/rust-lang/regex/issues/981 #[cfg(feature = "unicode")] #[test] fn regression_bad_word_boundary() { let re = regex!(r#"(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"#); let hay = "ubi-Darwin-x86_64.tar.gz"; assert!(!re.is_match(hay)); let hay = "ubi-Windows-x86_64.zip"; assert!(re.is_match(hay)); } // See: https://github.com/rust-lang/regex/issues/982 #[cfg(feature = "unicode-perl")] #[test] fn regression_unicode_perl_not_enabled() { let pat = r"(\d+\s?(years|year|y))?\s?(\d+\s?(months|month|m))?\s?(\d+\s?(weeks|week|w))?\s?(\d+\s?(days|day|d))?\s?(\d+\s?(hours|hour|h))?"; assert!(Regex::new(pat).is_ok()); } // See: https://github.com/rust-lang/regex/issues/995 #[test] fn regression_big_regex_overflow() { let pat = r" {2147483516}{2147483416}{5}"; assert!(Regex::new(pat).is_err()); } // See: https://github.com/rust-lang/regex/issues/999 #[test] fn regression_complete_literals_suffix_incorrect() { let needles = vec![ "aA", "bA", "cA", "dA", "eA", "fA", "gA", "hA", "iA", "jA", "kA", "lA", "mA", "nA", "oA", "pA", "qA", "rA", "sA", "tA", "uA", "vA", "wA", "xA", "yA", "zA", ]; let pattern = needles.join("|"); let re = regex!(&pattern); let hay = "FUBAR"; assert_eq!(0, re.find_iter(hay).count()); } <file_sep>/regex-cli/args/backtrack.rs use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::nfa::thompson::{backtrack, NFA}, }; use crate::args::{self, Configurable, Usage}; /// This exposes the configuration knobs for a `BoundedBacktracker`. #[derive(Debug, Default)] pub struct Config { backtrack: backtrack::Config, } impl Config { /// Return a `backtrack::Config` object from this configuration. pub fn backtrack(&self) -> anyhow::Result<backtrack::Config> { Ok(self.backtrack.clone()) } /// Builds a `BoundedBacktracker` regex engine from the NFA given. pub fn from_nfa( &self, nfa: &NFA, ) -> anyhow::Result<backtrack::BoundedBacktracker> { backtrack::Builder::new() .configure(self.backtrack()?) .build_from_nfa(nfa.clone()) .context("failed to build BoundedBacktracker matcher") } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Long("visited-capacity") => { let capacity = args::parse(p, "--visited-capacity")?; self.backtrack = self.backtrack.clone().visited_capacity(capacity); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[Usage::new( "--visited-capacity <capacity>", "Set the visited capacity for the bounded backtracker.", r#" Set the visited set capacity used to bound backtracking. The visited capacity represents the amount of heap memory (in bytes) to allocate toward tracking which parts of the backtracking search have been done before. The heap memory needed for any particular search is proportional to 'haystack.len() * nfa.states().len()', whichc an be quite large. Therefore, the bounded backtracker is typically only able to run on shorter haystacks. For a given regex, increasing the visited capacity means that the maximum haystack length that can be searched is increased. The default capacity is a reasonable but empirically chosen size. "#, )]; USAGES } } <file_sep>/regex-automata/src/dfa/sparse.rs /*! Types and routines specific to sparse DFAs. This module is the home of [`sparse::DFA`](DFA). Unlike the [`dense`](super::dense) module, this module does not contain a builder or configuration specific for sparse DFAs. Instead, the intended way to build a sparse DFA is either by using a default configuration with its constructor [`sparse::DFA::new`](DFA::new), or by first configuring the construction of a dense DFA with [`dense::Builder`](super::dense::Builder) and then calling [`dense::DFA::to_sparse`](super::dense::DFA::to_sparse). For example, this configures a sparse DFA to do an overlapping search: ``` use regex_automata::{ dfa::{Automaton, OverlappingState, dense}, HalfMatch, Input, MatchKind, }; let dense_re = dense::Builder::new() .configure(dense::Config::new().match_kind(MatchKind::All)) .build(r"Samwise|Sam")?; let sparse_re = dense_re.to_sparse()?; // Setup our haystack and initial start state. let input = Input::new("Samwise"); let mut state = OverlappingState::start(); // First, 'Sam' will match. sparse_re.try_search_overlapping_fwd(&input, &mut state)?; assert_eq!(Some(HalfMatch::must(0, 3)), state.get_match()); // And now 'Samwise' will match. sparse_re.try_search_overlapping_fwd(&input, &mut state)?; assert_eq!(Some(HalfMatch::must(0, 7)), state.get_match()); # Ok::<(), Box<dyn std::error::Error>>(()) ``` */ #[cfg(feature = "dfa-build")] use core::iter; use core::{ convert::{TryFrom, TryInto}, fmt, mem::size_of, }; #[cfg(feature = "dfa-build")] use alloc::{vec, vec::Vec}; #[cfg(feature = "dfa-build")] use crate::dfa::dense::{self, BuildError}; use crate::{ dfa::{ automaton::{fmt_state_indicator, Automaton}, dense::Flags, special::Special, StartKind, DEAD, }, util::{ alphabet::{ByteClasses, ByteSet}, escape::DebugByte, int::{Pointer, Usize, U16, U32}, prefilter::Prefilter, primitives::{PatternID, StateID}, search::{Anchored, Input, MatchError}, start::{Start, StartByteMap}, wire::{self, DeserializeError, Endian, SerializeError}, }, }; const LABEL: &str = "rust-regex-automata-dfa-sparse"; const VERSION: u32 = 2; /// A sparse deterministic finite automaton (DFA) with variable sized states. /// /// In contrast to a [dense::DFA](crate::dfa::dense::DFA), a sparse DFA uses /// a more space efficient representation for its transitions. Consequently, /// sparse DFAs may use much less memory than dense DFAs, but this comes at a /// price. In particular, reading the more space efficient transitions takes /// more work, and consequently, searching using a sparse DFA is typically /// slower than a dense DFA. /// /// A sparse DFA can be built using the default configuration via the /// [`DFA::new`] constructor. Otherwise, one can configure various aspects /// of a dense DFA via [`dense::Builder`](crate::dfa::dense::Builder), /// and then convert a dense DFA to a sparse DFA using /// [`dense::DFA::to_sparse`](crate::dfa::dense::DFA::to_sparse). /// /// In general, a sparse DFA supports all the same search operations as a dense /// DFA. /// /// Making the choice between a dense and sparse DFA depends on your specific /// work load. If you can sacrifice a bit of search time performance, then a /// sparse DFA might be the best choice. In particular, while sparse DFAs are /// probably always slower than dense DFAs, you may find that they are easily /// fast enough for your purposes! /// /// # Type parameters /// /// A `DFA` has one type parameter, `T`, which is used to represent the parts /// of a sparse DFA. `T` is typically a `Vec<u8>` or a `&[u8]`. /// /// # The `Automaton` trait /// /// This type implements the [`Automaton`] trait, which means it can be used /// for searching. For example: /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// let dfa = DFA::new("foo[0-9]+")?; /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone)] pub struct DFA<T> { // When compared to a dense DFA, a sparse DFA *looks* a lot simpler // representation-wise. In reality, it is perhaps more complicated. Namely, // in a dense DFA, all information needs to be very cheaply accessible // using only state IDs. In a sparse DFA however, each state uses a // variable amount of space because each state encodes more information // than just its transitions. Each state also includes an accelerator if // one exists, along with the matching pattern IDs if the state is a match // state. // // That is, a lot of the complexity is pushed down into how each state // itself is represented. tt: Transitions<T>, st: StartTable<T>, special: Special, pre: Option<Prefilter>, quitset: ByteSet, flags: Flags, } #[cfg(feature = "dfa-build")] impl DFA<Vec<u8>> { /// Parse the given regular expression using a default configuration and /// return the corresponding sparse DFA. /// /// If you want a non-default configuration, then use /// the [`dense::Builder`](crate::dfa::dense::Builder) /// to set your own configuration, and then call /// [`dense::DFA::to_sparse`](crate::dfa::dense::DFA::to_sparse) to create /// a sparse DFA. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; /// /// let dfa = sparse::DFA::new("foo[0-9]+bar")?; /// /// let expected = Some(HalfMatch::must(0, 11)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new(pattern: &str) -> Result<DFA<Vec<u8>>, BuildError> { dense::Builder::new() .build(pattern) .and_then(|dense| dense.to_sparse()) } /// Parse the given regular expressions using a default configuration and /// return the corresponding multi-DFA. /// /// If you want a non-default configuration, then use /// the [`dense::Builder`](crate::dfa::dense::Builder) /// to set your own configuration, and then call /// [`dense::DFA::to_sparse`](crate::dfa::dense::DFA::to_sparse) to create /// a sparse DFA. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; /// /// let dfa = sparse::DFA::new_many(&["[0-9]+", "[a-z]+"])?; /// let expected = Some(HalfMatch::must(1, 3)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] pub fn new_many<P: AsRef<str>>( patterns: &[P], ) -> Result<DFA<Vec<u8>>, BuildError> { dense::Builder::new() .build_many(patterns) .and_then(|dense| dense.to_sparse()) } } #[cfg(feature = "dfa-build")] impl DFA<Vec<u8>> { /// Create a new DFA that matches every input. /// /// # Example /// /// ``` /// use regex_automata::{ /// dfa::{Automaton, sparse}, /// HalfMatch, Input, /// }; /// /// let dfa = sparse::DFA::always_match()?; /// /// let expected = Some(HalfMatch::must(0, 0)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn always_match() -> Result<DFA<Vec<u8>>, BuildError> { dense::DFA::always_match()?.to_sparse() } /// Create a new sparse DFA that never matches any input. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse}, Input}; /// /// let dfa = sparse::DFA::never_match()?; /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?); /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn never_match() -> Result<DFA<Vec<u8>>, BuildError> { dense::DFA::never_match()?.to_sparse() } /// The implementation for constructing a sparse DFA from a dense DFA. pub(crate) fn from_dense<T: AsRef<[u32]>>( dfa: &dense::DFA<T>, ) -> Result<DFA<Vec<u8>>, BuildError> { // In order to build the transition table, we need to be able to write // state identifiers for each of the "next" transitions in each state. // Our state identifiers correspond to the byte offset in the // transition table at which the state is encoded. Therefore, we do not // actually know what the state identifiers are until we've allocated // exactly as much space as we need for each state. Thus, construction // of the transition table happens in two passes. // // In the first pass, we fill out the shell of each state, which // includes the transition length, the input byte ranges and // zero-filled space for the transitions and accelerators, if present. // In this first pass, we also build up a map from the state identifier // index of the dense DFA to the state identifier in this sparse DFA. // // In the second pass, we fill in the transitions based on the map // built in the first pass. // The capacity given here reflects a minimum. (Well, the true minimum // is likely even bigger, but hopefully this saves a few reallocs.) let mut sparse = Vec::with_capacity(StateID::SIZE * dfa.state_len()); // This maps state indices from the dense DFA to StateIDs in the sparse // DFA. We build out this map on the first pass, and then use it in the // second pass to back-fill our transitions. let mut remap: Vec<StateID> = vec![DEAD; dfa.state_len()]; for state in dfa.states() { let pos = sparse.len(); remap[dfa.to_index(state.id())] = StateID::new(pos) .map_err(|_| BuildError::too_many_states())?; // zero-filled space for the transition length sparse.push(0); sparse.push(0); let mut transition_len = 0; for (unit1, unit2, _) in state.sparse_transitions() { match (unit1.as_u8(), unit2.as_u8()) { (Some(b1), Some(b2)) => { transition_len += 1; sparse.push(b1); sparse.push(b2); } (None, None) => {} (Some(_), None) | (None, Some(_)) => { // can never occur because sparse_transitions never // groups EOI with any other transition. unreachable!() } } } // Add dummy EOI transition. This is never actually read while // searching, but having space equivalent to the total number // of transitions is convenient. Otherwise, we'd need to track // a different number of transitions for the byte ranges as for // the 'next' states. // // N.B. The loop above is not guaranteed to yield the EOI // transition, since it may point to a DEAD state. By putting // it here, we always write the EOI transition, and thus // guarantee that our transition length is >0. Why do we always // need the EOI transition? Because in order to implement // Automaton::next_eoi_state, this lets us just ask for the last // transition. There are probably other/better ways to do this. transition_len += 1; sparse.push(0); sparse.push(0); // Check some assumptions about transition length. assert_ne!( transition_len, 0, "transition length should be non-zero", ); assert!( transition_len <= 257, "expected transition length {} to be <= 257", transition_len, ); // Fill in the transition length. // Since transition length is always <= 257, we use the most // significant bit to indicate whether this is a match state or // not. let ntrans = if dfa.is_match_state(state.id()) { transition_len | (1 << 15) } else { transition_len }; wire::NE::write_u16(ntrans, &mut sparse[pos..]); // zero-fill the actual transitions. // Unwraps are OK since transition_length <= 257 and our minimum // support usize size is 16-bits. let zeros = usize::try_from(transition_len) .unwrap() .checked_mul(StateID::SIZE) .unwrap(); sparse.extend(iter::repeat(0).take(zeros)); // If this is a match state, write the pattern IDs matched by this // state. if dfa.is_match_state(state.id()) { let plen = dfa.match_pattern_len(state.id()); // Write the actual pattern IDs with a u32 length prefix. // First, zero-fill space. let mut pos = sparse.len(); // Unwraps are OK since it's guaranteed that plen <= // PatternID::LIMIT, which is in turn guaranteed to fit into a // u32. let zeros = size_of::<u32>() .checked_mul(plen) .unwrap() .checked_add(size_of::<u32>()) .unwrap(); sparse.extend(iter::repeat(0).take(zeros)); // Now write the length prefix. wire::NE::write_u32( // Will never fail since u32::MAX is invalid pattern ID. // Thus, the number of pattern IDs is representable by a // u32. plen.try_into().expect("pattern ID length fits in u32"), &mut sparse[pos..], ); pos += size_of::<u32>(); // Now write the pattern IDs. for &pid in dfa.pattern_id_slice(state.id()) { pos += wire::write_pattern_id::<wire::NE>( pid, &mut sparse[pos..], ); } } // And now add the accelerator, if one exists. An accelerator is // at most 4 bytes and at least 1 byte. The first byte is the // length, N. N bytes follow the length. The set of bytes that // follow correspond (exhaustively) to the bytes that must be seen // to leave this state. let accel = dfa.accelerator(state.id()); sparse.push(accel.len().try_into().unwrap()); sparse.extend_from_slice(accel); } let mut new = DFA { tt: Transitions { sparse, classes: dfa.byte_classes().clone(), state_len: dfa.state_len(), pattern_len: dfa.pattern_len(), }, st: StartTable::from_dense_dfa(dfa, &remap)?, special: dfa.special().remap(|id| remap[dfa.to_index(id)]), pre: dfa.get_prefilter().map(|p| p.clone()), quitset: dfa.quitset().clone(), flags: dfa.flags().clone(), }; // And here's our second pass. Iterate over all of the dense states // again, and update the transitions in each of the states in the // sparse DFA. for old_state in dfa.states() { let new_id = remap[dfa.to_index(old_state.id())]; let mut new_state = new.tt.state_mut(new_id); let sparse = old_state.sparse_transitions(); for (i, (_, _, next)) in sparse.enumerate() { let next = remap[dfa.to_index(next)]; new_state.set_next_at(i, next); } } debug!( "created sparse DFA, memory usage: {} (dense memory usage: {})", new.memory_usage(), dfa.memory_usage(), ); Ok(new) } } impl<T: AsRef<[u8]>> DFA<T> { /// Cheaply return a borrowed version of this sparse DFA. Specifically, the /// DFA returned always uses `&[u8]` for its transitions. pub fn as_ref<'a>(&'a self) -> DFA<&'a [u8]> { DFA { tt: self.tt.as_ref(), st: self.st.as_ref(), special: self.special, pre: self.pre.clone(), quitset: self.quitset, flags: self.flags, } } /// Return an owned version of this sparse DFA. Specifically, the DFA /// returned always uses `Vec<u8>` for its transitions. /// /// Effectively, this returns a sparse DFA whose transitions live on the /// heap. #[cfg(feature = "alloc")] pub fn to_owned(&self) -> DFA<alloc::vec::Vec<u8>> { DFA { tt: self.tt.to_owned(), st: self.st.to_owned(), special: self.special, pre: self.pre.clone(), quitset: self.quitset, flags: self.flags, } } /// Returns the starting state configuration for this DFA. /// /// The default is [`StartKind::Both`], which means the DFA supports both /// unanchored and anchored searches. However, this can generally lead to /// bigger DFAs. Therefore, a DFA might be compiled with support for just /// unanchored or anchored searches. In that case, running a search with /// an unsupported configuration will panic. pub fn start_kind(&self) -> StartKind { self.st.kind } /// Returns true only if this DFA has starting states for each pattern. /// /// When a DFA has starting states for each pattern, then a search with the /// DFA can be configured to only look for anchored matches of a specific /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can /// accept a [`Anchored::Pattern`] if and only if this method returns true. /// Otherwise, an error will be returned. /// /// Note that if the DFA is empty, this always returns false. pub fn starts_for_each_pattern(&self) -> bool { self.st.pattern_len.is_some() } /// Returns the equivalence classes that make up the alphabet for this DFA. /// /// Unless [`dense::Config::byte_classes`] was disabled, it is possible /// that multiple distinct bytes are grouped into the same equivalence /// class if it is impossible for them to discriminate between a match and /// a non-match. This has the effect of reducing the overall alphabet size /// and in turn potentially substantially reducing the size of the DFA's /// transition table. /// /// The downside of using equivalence classes like this is that every state /// transition will automatically use this map to convert an arbitrary /// byte to its corresponding equivalence class. In practice this has a /// negligible impact on performance. pub fn byte_classes(&self) -> &ByteClasses { &self.tt.classes } /// Returns the memory usage, in bytes, of this DFA. /// /// The memory usage is computed based on the number of bytes used to /// represent this DFA. /// /// This does **not** include the stack size used up by this DFA. To /// compute that, use `std::mem::size_of::<sparse::DFA>()`. pub fn memory_usage(&self) -> usize { self.tt.memory_usage() + self.st.memory_usage() } } /// Routines for converting a sparse DFA to other representations, such as raw /// bytes suitable for persistent storage. impl<T: AsRef<[u8]>> DFA<T> { /// Serialize this DFA as raw bytes to a `Vec<u8>` in little endian /// format. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// Note that unlike a [`dense::DFA`](crate::dfa::dense::DFA)'s /// serialization methods, this does not add any initial padding to the /// returned bytes. Padding isn't required for sparse DFAs since they have /// no alignment requirements. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA: /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // N.B. We use native endianness here to make the example work, but /// // using to_bytes_little_endian would work on a little endian target. /// let buf = original_dfa.to_bytes_native_endian(); /// // Even if buf has initial padding, DFA::from_bytes will automatically /// // ignore it. /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] pub fn to_bytes_little_endian(&self) -> Vec<u8> { self.to_bytes::<wire::LE>() } /// Serialize this DFA as raw bytes to a `Vec<u8>` in big endian /// format. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// Note that unlike a [`dense::DFA`](crate::dfa::dense::DFA)'s /// serialization methods, this does not add any initial padding to the /// returned bytes. Padding isn't required for sparse DFAs since they have /// no alignment requirements. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA: /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // N.B. We use native endianness here to make the example work, but /// // using to_bytes_big_endian would work on a big endian target. /// let buf = original_dfa.to_bytes_native_endian(); /// // Even if buf has initial padding, DFA::from_bytes will automatically /// // ignore it. /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] pub fn to_bytes_big_endian(&self) -> Vec<u8> { self.to_bytes::<wire::BE>() } /// Serialize this DFA as raw bytes to a `Vec<u8>` in native endian /// format. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// Note that unlike a [`dense::DFA`](crate::dfa::dense::DFA)'s /// serialization methods, this does not add any initial padding to the /// returned bytes. Padding isn't required for sparse DFAs since they have /// no alignment requirements. /// /// Generally speaking, native endian format should only be used when /// you know that the target you're compiling the DFA for matches the /// endianness of the target on which you're compiling DFA. For example, /// if serialization and deserialization happen in the same process or on /// the same machine. Otherwise, when serializing a DFA for use in a /// portable environment, you'll almost certainly want to serialize _both_ /// a little endian and a big endian version and then load the correct one /// based on the target's configuration. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA: /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// let buf = original_dfa.to_bytes_native_endian(); /// // Even if buf has initial padding, DFA::from_bytes will automatically /// // ignore it. /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "dfa-build")] pub fn to_bytes_native_endian(&self) -> Vec<u8> { self.to_bytes::<wire::NE>() } /// The implementation of the public `to_bytes` serialization methods, /// which is generic over endianness. #[cfg(feature = "dfa-build")] fn to_bytes<E: Endian>(&self) -> Vec<u8> { let mut buf = vec![0; self.write_to_len()]; // This should always succeed since the only possible serialization // error is providing a buffer that's too small, but we've ensured that // `buf` is big enough here. self.write_to::<E>(&mut buf).unwrap(); buf } /// Serialize this DFA as raw bytes to the given slice, in little endian /// format. Upon success, the total number of bytes written to `dst` is /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// # Errors /// /// This returns an error if the given destination slice is not big enough /// to contain the full serialized DFA. If an error occurs, then nothing /// is written to `dst`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA without /// dynamic memory allocation. /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // Create a 4KB buffer on the stack to store our serialized DFA. /// let mut buf = [0u8; 4 * (1<<10)]; /// // N.B. We use native endianness here to make the example work, but /// // using write_to_little_endian would work on a little endian target. /// let written = original_dfa.write_to_native_endian(&mut buf)?; /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn write_to_little_endian( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { self.write_to::<wire::LE>(dst) } /// Serialize this DFA as raw bytes to the given slice, in big endian /// format. Upon success, the total number of bytes written to `dst` is /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// # Errors /// /// This returns an error if the given destination slice is not big enough /// to contain the full serialized DFA. If an error occurs, then nothing /// is written to `dst`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA without /// dynamic memory allocation. /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // Create a 4KB buffer on the stack to store our serialized DFA. /// let mut buf = [0u8; 4 * (1<<10)]; /// // N.B. We use native endianness here to make the example work, but /// // using write_to_big_endian would work on a big endian target. /// let written = original_dfa.write_to_native_endian(&mut buf)?; /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn write_to_big_endian( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { self.write_to::<wire::BE>(dst) } /// Serialize this DFA as raw bytes to the given slice, in native endian /// format. Upon success, the total number of bytes written to `dst` is /// returned. /// /// The written bytes are guaranteed to be deserialized correctly and /// without errors in a semver compatible release of this crate by a /// `DFA`'s deserialization APIs (assuming all other criteria for the /// deserialization APIs has been satisfied): /// /// * [`DFA::from_bytes`] /// * [`DFA::from_bytes_unchecked`] /// /// Generally speaking, native endian format should only be used when /// you know that the target you're compiling the DFA for matches the /// endianness of the target on which you're compiling DFA. For example, /// if serialization and deserialization happen in the same process or on /// the same machine. Otherwise, when serializing a DFA for use in a /// portable environment, you'll almost certainly want to serialize _both_ /// a little endian and a big endian version and then load the correct one /// based on the target's configuration. /// /// # Errors /// /// This returns an error if the given destination slice is not big enough /// to contain the full serialized DFA. If an error occurs, then nothing /// is written to `dst`. /// /// # Example /// /// This example shows how to serialize and deserialize a DFA without /// dynamic memory allocation. /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// // Create a 4KB buffer on the stack to store our serialized DFA. /// let mut buf = [0u8; 4 * (1<<10)]; /// let written = original_dfa.write_to_native_endian(&mut buf)?; /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn write_to_native_endian( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { self.write_to::<wire::NE>(dst) } /// The implementation of the public `write_to` serialization methods, /// which is generic over endianness. fn write_to<E: Endian>( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { let mut nw = 0; nw += wire::write_label(LABEL, &mut dst[nw..])?; nw += wire::write_endianness_check::<E>(&mut dst[nw..])?; nw += wire::write_version::<E>(VERSION, &mut dst[nw..])?; nw += { // Currently unused, intended for future flexibility E::write_u32(0, &mut dst[nw..]); size_of::<u32>() }; nw += self.flags.write_to::<E>(&mut dst[nw..])?; nw += self.tt.write_to::<E>(&mut dst[nw..])?; nw += self.st.write_to::<E>(&mut dst[nw..])?; nw += self.special.write_to::<E>(&mut dst[nw..])?; nw += self.quitset.write_to::<E>(&mut dst[nw..])?; Ok(nw) } /// Return the total number of bytes required to serialize this DFA. /// /// This is useful for determining the size of the buffer required to pass /// to one of the serialization routines: /// /// * [`DFA::write_to_little_endian`] /// * [`DFA::write_to_big_endian`] /// * [`DFA::write_to_native_endian`] /// /// Passing a buffer smaller than the size returned by this method will /// result in a serialization error. /// /// # Example /// /// This example shows how to dynamically allocate enough room to serialize /// a sparse DFA. /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// // Compile our original DFA. /// let original_dfa = DFA::new("foo[0-9]+")?; /// /// let mut buf = vec![0; original_dfa.write_to_len()]; /// let written = original_dfa.write_to_native_endian(&mut buf)?; /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn write_to_len(&self) -> usize { wire::write_label_len(LABEL) + wire::write_endianness_check_len() + wire::write_version_len() + size_of::<u32>() // unused, intended for future flexibility + self.flags.write_to_len() + self.tt.write_to_len() + self.st.write_to_len() + self.special.write_to_len() + self.quitset.write_to_len() } } impl<'a> DFA<&'a [u8]> { /// Safely deserialize a sparse DFA with a specific state identifier /// representation. Upon success, this returns both the deserialized DFA /// and the number of bytes read from the given slice. Namely, the contents /// of the slice beyond the DFA are not read. /// /// Deserializing a DFA using this routine will never allocate heap memory. /// For safety purposes, the DFA's transitions will be verified such that /// every transition points to a valid state. If this verification is too /// costly, then a [`DFA::from_bytes_unchecked`] API is provided, which /// will always execute in constant time. /// /// The bytes given must be generated by one of the serialization APIs /// of a `DFA` using a semver compatible release of this crate. Those /// include: /// /// * [`DFA::to_bytes_little_endian`] /// * [`DFA::to_bytes_big_endian`] /// * [`DFA::to_bytes_native_endian`] /// * [`DFA::write_to_little_endian`] /// * [`DFA::write_to_big_endian`] /// * [`DFA::write_to_native_endian`] /// /// The `to_bytes` methods allocate and return a `Vec<u8>` for you. The /// `write_to` methods do not allocate and write to an existing slice /// (which may be on the stack). Since deserialization always uses the /// native endianness of the target platform, the serialization API you use /// should match the endianness of the target platform. (It's often a good /// idea to generate serialized DFAs for both forms of endianness and then /// load the correct one based on endianness.) /// /// # Errors /// /// Generally speaking, it's easier to state the conditions in which an /// error is _not_ returned. All of the following must be true: /// /// * The bytes given must be produced by one of the serialization APIs /// on this DFA, as mentioned above. /// * The endianness of the target platform matches the endianness used to /// serialized the provided DFA. /// /// If any of the above are not true, then an error will be returned. /// /// Note that unlike deserializing a /// [`dense::DFA`](crate::dfa::dense::DFA), deserializing a sparse DFA has /// no alignment requirements. That is, an alignment of `1` is valid. /// /// # Panics /// /// This routine will never panic for any input. /// /// # Example /// /// This example shows how to serialize a DFA to raw bytes, deserialize it /// and then use it for searching. /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// let initial = DFA::new("foo[0-9]+")?; /// let bytes = initial.to_bytes_native_endian(); /// let dfa: DFA<&[u8]> = DFA::from_bytes(&bytes)?.0; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: loading a DFA from static memory /// /// One use case this library supports is the ability to serialize a /// DFA to disk and then use `include_bytes!` to store it in a compiled /// Rust program. Those bytes can then be cheaply deserialized into a /// `DFA` structure at runtime and used for searching without having to /// re-compile the DFA (which can be quite costly). /// /// We can show this in two parts. The first part is serializing the DFA to /// a file: /// /// ```no_run /// use regex_automata::dfa::sparse::DFA; /// /// let dfa = DFA::new("foo[0-9]+")?; /// /// // Write a big endian serialized version of this DFA to a file. /// let bytes = dfa.to_bytes_big_endian(); /// std::fs::write("foo.bigendian.dfa", &bytes)?; /// /// // Do it again, but this time for little endian. /// let bytes = dfa.to_bytes_little_endian(); /// std::fs::write("foo.littleendian.dfa", &bytes)?; /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// And now the second part is embedding the DFA into the compiled program /// and deserializing it at runtime on first use. We use conditional /// compilation to choose the correct endianness. We do not need to employ /// any special tricks to ensure a proper alignment, since a sparse DFA has /// no alignment requirements. /// /// ```no_run /// use regex_automata::{ /// dfa::{Automaton, sparse::DFA}, /// util::lazy::Lazy, /// HalfMatch, Input, /// }; /// /// // This crate provides its own "lazy" type, kind of like /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc /// // no-std environments and let's us write this using completely /// // safe code. /// static RE: Lazy<DFA<&'static [u8]>> = Lazy::new(|| { /// # const _: &str = stringify! { /// #[cfg(target_endian = "big")] /// static BYTES: &[u8] = include_bytes!("foo.bigendian.dfa"); /// #[cfg(target_endian = "little")] /// static BYTES: &[u8] = include_bytes!("foo.littleendian.dfa"); /// # }; /// # static BYTES: &[u8] = b""; /// /// let (dfa, _) = DFA::from_bytes(BYTES) /// .expect("serialized DFA should be valid"); /// dfa /// }); /// /// let expected = Ok(Some(HalfMatch::must(0, 8))); /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345"))); /// ``` /// /// Alternatively, consider using /// [`lazy_static`](https://crates.io/crates/lazy_static) /// or /// [`once_cell`](https://crates.io/crates/once_cell), /// which will guarantee safety for you. pub fn from_bytes( slice: &'a [u8], ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { // SAFETY: This is safe because we validate both the sparse transitions // (by trying to decode every state) and start state ID list below. If // either validation fails, then we return an error. let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? }; dfa.tt.validate(&dfa.special)?; dfa.st.validate(&dfa.special, &dfa.tt)?; // N.B. dfa.special doesn't have a way to do unchecked deserialization, // so it has already been validated. Ok((dfa, nread)) } /// Deserialize a DFA with a specific state identifier representation in /// constant time by omitting the verification of the validity of the /// sparse transitions. /// /// This is just like [`DFA::from_bytes`], except it can potentially return /// a DFA that exhibits undefined behavior if its transitions contains /// invalid state identifiers. /// /// This routine is useful if you need to deserialize a DFA cheaply and /// cannot afford the transition validation performed by `from_bytes`. /// /// # Safety /// /// This routine is not safe because it permits callers to provide /// arbitrary transitions with possibly incorrect state identifiers. While /// the various serialization routines will never return an incorrect /// DFA, there is no guarantee that the bytes provided here are correct. /// While `from_bytes_unchecked` will still do several forms of basic /// validation, this routine does not check that the transitions themselves /// are correct. Given an incorrect transition table, it is possible for /// the search routines to access out-of-bounds memory because of explicit /// bounds check elision. /// /// # Example /// /// ``` /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; /// /// let initial = DFA::new("foo[0-9]+")?; /// let bytes = initial.to_bytes_native_endian(); /// // SAFETY: This is guaranteed to be safe since the bytes given come /// // directly from a compatible serialization routine. /// let dfa: DFA<&[u8]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 }; /// /// let expected = Some(HalfMatch::must(0, 8)); /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub unsafe fn from_bytes_unchecked( slice: &'a [u8], ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { let mut nr = 0; nr += wire::read_label(&slice[nr..], LABEL)?; nr += wire::read_endianness_check(&slice[nr..])?; nr += wire::read_version(&slice[nr..], VERSION)?; let _unused = wire::try_read_u32(&slice[nr..], "unused space")?; nr += size_of::<u32>(); let (flags, nread) = Flags::from_bytes(&slice[nr..])?; nr += nread; let (tt, nread) = Transitions::from_bytes_unchecked(&slice[nr..])?; nr += nread; let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?; nr += nread; let (special, nread) = Special::from_bytes(&slice[nr..])?; nr += nread; if special.max.as_usize() >= tt.sparse().len() { return Err(DeserializeError::generic( "max should not be greater than or equal to sparse bytes", )); } let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?; nr += nread; // Prefilters don't support serialization, so they're always absent. let pre = None; Ok((DFA { tt, st, special, pre, quitset, flags }, nr)) } } impl<T: AsRef<[u8]>> fmt::Debug for DFA<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "sparse::DFA(")?; for state in self.tt.states() { fmt_state_indicator(f, self, state.id())?; writeln!(f, "{:06?}: {:?}", state.id().as_usize(), state)?; } writeln!(f, "")?; for (i, (start_id, anchored, sty)) in self.st.iter().enumerate() { if i % self.st.stride == 0 { match anchored { Anchored::No => writeln!(f, "START-GROUP(unanchored)")?, Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?, Anchored::Pattern(pid) => writeln!( f, "START_GROUP(pattern: {:?})", pid.as_usize() )?, } } writeln!(f, " {:?} => {:06?}", sty, start_id.as_usize())?; } writeln!(f, "state length: {:?}", self.tt.state_len)?; writeln!(f, "pattern length: {:?}", self.pattern_len())?; writeln!(f, "flags: {:?}", self.flags)?; writeln!(f, ")")?; Ok(()) } } // SAFETY: We assert that our implementation of each method is correct. unsafe impl<T: AsRef<[u8]>> Automaton for DFA<T> { #[inline] fn is_special_state(&self, id: StateID) -> bool { self.special.is_special_state(id) } #[inline] fn is_dead_state(&self, id: StateID) -> bool { self.special.is_dead_state(id) } #[inline] fn is_quit_state(&self, id: StateID) -> bool { self.special.is_quit_state(id) } #[inline] fn is_match_state(&self, id: StateID) -> bool { self.special.is_match_state(id) } #[inline] fn is_start_state(&self, id: StateID) -> bool { self.special.is_start_state(id) } #[inline] fn is_accel_state(&self, id: StateID) -> bool { self.special.is_accel_state(id) } // This is marked as inline to help dramatically boost sparse searching, // which decodes each state it enters to follow the next transition. #[cfg_attr(feature = "perf-inline", inline(always))] fn next_state(&self, current: StateID, input: u8) -> StateID { let input = self.tt.classes.get(input); self.tt.state(current).next(input) } #[inline] unsafe fn next_state_unchecked( &self, current: StateID, input: u8, ) -> StateID { self.next_state(current, input) } #[inline] fn next_eoi_state(&self, current: StateID) -> StateID { self.tt.state(current).next_eoi() } #[inline] fn pattern_len(&self) -> usize { self.tt.pattern_len } #[inline] fn match_len(&self, id: StateID) -> usize { self.tt.state(id).pattern_len() } #[inline] fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID { // This is an optimization for the very common case of a DFA with a // single pattern. This conditional avoids a somewhat more costly path // that finds the pattern ID from the state machine, which requires // a bit of slicing/pointer-chasing. This optimization tends to only // matter when matches are frequent. if self.tt.pattern_len == 1 { return PatternID::ZERO; } self.tt.state(id).pattern_id(match_index) } #[inline] fn has_empty(&self) -> bool { self.flags.has_empty } #[inline] fn is_utf8(&self) -> bool { self.flags.is_utf8 } #[inline] fn is_always_start_anchored(&self) -> bool { self.flags.is_always_start_anchored } #[inline] fn start_state_forward( &self, input: &Input<'_>, ) -> Result<StateID, MatchError> { if !self.quitset.is_empty() && input.start() > 0 { let offset = input.start() - 1; let byte = input.haystack()[offset]; if self.quitset.contains(byte) { return Err(MatchError::quit(byte, offset)); } } let start = self.st.start_map.fwd(&input); self.st.start(input, start) } #[inline] fn start_state_reverse( &self, input: &Input<'_>, ) -> Result<StateID, MatchError> { if !self.quitset.is_empty() && input.end() < input.haystack().len() { let offset = input.end(); let byte = input.haystack()[offset]; if self.quitset.contains(byte) { return Err(MatchError::quit(byte, offset)); } } let start = self.st.start_map.rev(&input); self.st.start(input, start) } #[inline] fn universal_start_state(&self, mode: Anchored) -> Option<StateID> { match mode { Anchored::No => self.st.universal_start_unanchored, Anchored::Yes => self.st.universal_start_anchored, Anchored::Pattern(_) => None, } } #[inline] fn accelerator(&self, id: StateID) -> &[u8] { self.tt.state(id).accelerator() } #[inline] fn get_prefilter(&self) -> Option<&Prefilter> { self.pre.as_ref() } } /// The transition table portion of a sparse DFA. /// /// The transition table is the core part of the DFA in that it describes how /// to move from one state to another based on the input sequence observed. /// /// Unlike a typical dense table based DFA, states in a sparse transition /// table have variable size. That is, states with more transitions use more /// space than states with fewer transitions. This means that finding the next /// transition takes more work than with a dense DFA, but also typically uses /// much less space. #[derive(Clone)] struct Transitions<T> { /// The raw encoding of each state in this DFA. /// /// Each state has the following information: /// /// * A set of transitions to subsequent states. Transitions to the dead /// state are omitted. /// * If the state can be accelerated, then any additional accelerator /// information. /// * If the state is a match state, then the state contains all pattern /// IDs that match when in that state. /// /// To decode a state, use Transitions::state. /// /// In practice, T is either Vec<u8> or &[u8]. sparse: T, /// A set of equivalence classes, where a single equivalence class /// represents a set of bytes that never discriminate between a match /// and a non-match in the DFA. Each equivalence class corresponds to a /// single character in this DFA's alphabet, where the maximum number of /// characters is 257 (each possible value of a byte plus the special /// EOI transition). Consequently, the number of equivalence classes /// corresponds to the number of transitions for each DFA state. Note /// though that the *space* used by each DFA state in the transition table /// may be larger. The total space used by each DFA state is known as the /// stride and is documented above. /// /// The only time the number of equivalence classes is fewer than 257 is /// if the DFA's kind uses byte classes which is the default. Equivalence /// classes should generally only be disabled when debugging, so that /// the transitions themselves aren't obscured. Disabling them has no /// other benefit, since the equivalence class map is always used while /// searching. In the vast majority of cases, the number of equivalence /// classes is substantially smaller than 257, particularly when large /// Unicode classes aren't used. /// /// N.B. Equivalence classes aren't particularly useful in a sparse DFA /// in the current implementation, since equivalence classes generally tend /// to correspond to continuous ranges of bytes that map to the same /// transition. So in a sparse DFA, equivalence classes don't really lead /// to a space savings. In the future, it would be good to try and remove /// them from sparse DFAs entirely, but requires a bit of work since sparse /// DFAs are built from dense DFAs, which are in turn built on top of /// equivalence classes. classes: ByteClasses, /// The total number of states in this DFA. Note that a DFA always has at /// least one state---the dead state---even the empty DFA. In particular, /// the dead state always has ID 0 and is correspondingly always the first /// state. The dead state is never a match state. state_len: usize, /// The total number of unique patterns represented by these match states. pattern_len: usize, } impl<'a> Transitions<&'a [u8]> { unsafe fn from_bytes_unchecked( mut slice: &'a [u8], ) -> Result<(Transitions<&'a [u8]>, usize), DeserializeError> { let slice_start = slice.as_ptr().as_usize(); let (state_len, nr) = wire::try_read_u32_as_usize(&slice, "state length")?; slice = &slice[nr..]; let (pattern_len, nr) = wire::try_read_u32_as_usize(&slice, "pattern length")?; slice = &slice[nr..]; let (classes, nr) = ByteClasses::from_bytes(&slice)?; slice = &slice[nr..]; let (len, nr) = wire::try_read_u32_as_usize(&slice, "sparse transitions length")?; slice = &slice[nr..]; wire::check_slice_len(slice, len, "sparse states byte length")?; let sparse = &slice[..len]; slice = &slice[len..]; let trans = Transitions { sparse, classes, state_len, pattern_len }; Ok((trans, slice.as_ptr().as_usize() - slice_start)) } } impl<T: AsRef<[u8]>> Transitions<T> { /// Writes a serialized form of this transition table to the buffer given. /// If the buffer is too small, then an error is returned. To determine /// how big the buffer must be, use `write_to_len`. fn write_to<E: Endian>( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small( "sparse transition table", )); } dst = &mut dst[..nwrite]; // write state length E::write_u32(u32::try_from(self.state_len).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write pattern length E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write byte class map let n = self.classes.write_to(dst)?; dst = &mut dst[n..]; // write number of bytes in sparse transitions E::write_u32(u32::try_from(self.sparse().len()).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write actual transitions let mut id = DEAD; while id.as_usize() < self.sparse().len() { let state = self.state(id); let n = state.write_to::<E>(&mut dst)?; dst = &mut dst[n..]; // The next ID is the offset immediately following `state`. id = StateID::new(id.as_usize() + state.write_to_len()).unwrap(); } Ok(nwrite) } /// Returns the number of bytes the serialized form of this transition /// table will use. fn write_to_len(&self) -> usize { size_of::<u32>() // state length + size_of::<u32>() // pattern length + self.classes.write_to_len() + size_of::<u32>() // sparse transitions length + self.sparse().len() } /// Validates that every state ID in this transition table is valid. /// /// That is, every state ID can be used to correctly index a state in this /// table. fn validate(&self, sp: &Special) -> Result<(), DeserializeError> { // In order to validate everything, we not only need to make sure we // can decode every state, but that every transition in every state // points to a valid state. There are many duplicative transitions, so // we record state IDs that we've verified so that we don't redo the // decoding work. // // Except, when in no_std mode, we don't have dynamic memory allocation // available to us, so we skip this optimization. It's not clear // whether doing something more clever is worth it just yet. If you're // profiling this code and need it to run faster, please file an issue. // // OK, so we also use this to record the set of valid state IDs. Since // it is possible for a transition to point to an invalid state ID that // still (somehow) deserializes to a valid state. So we need to make // sure our transitions are limited to actually correct state IDs. // The problem is, I'm not sure how to do this verification step in // no-std no-alloc mode. I think we'd *have* to store the set of valid // state IDs in the DFA itself. For now, we don't do this verification // in no-std no-alloc mode. The worst thing that can happen is an // incorrect result. But no panics or memory safety problems should // result. Because we still do validate that the state itself is // "valid" in the sense that everything it points to actually exists. // // ---AG struct Seen { #[cfg(feature = "alloc")] set: alloc::collections::BTreeSet<StateID>, #[cfg(not(feature = "alloc"))] set: core::marker::PhantomData<StateID>, } #[cfg(feature = "alloc")] impl Seen { fn new() -> Seen { Seen { set: alloc::collections::BTreeSet::new() } } fn insert(&mut self, id: StateID) { self.set.insert(id); } fn contains(&self, id: &StateID) -> bool { self.set.contains(id) } } #[cfg(not(feature = "alloc"))] impl Seen { fn new() -> Seen { Seen { set: core::marker::PhantomData } } fn insert(&mut self, _id: StateID) {} fn contains(&self, _id: &StateID) -> bool { false } } let mut verified: Seen = Seen::new(); // We need to make sure that we decode the correct number of states. // Otherwise, an empty set of transitions would validate even if the // recorded state length is non-empty. let mut len = 0; // We can't use the self.states() iterator because it assumes the state // encodings are valid. It could panic if they aren't. let mut id = DEAD; while id.as_usize() < self.sparse().len() { // Before we even decode the state, we check that the ID itself // is well formed. That is, if it's a special state then it must // actually be a quit, dead, accel, match or start state. if sp.is_special_state(id) { let is_actually_special = sp.is_dead_state(id) || sp.is_quit_state(id) || sp.is_match_state(id) || sp.is_start_state(id) || sp.is_accel_state(id); if !is_actually_special { // This is kind of a cryptic error message... return Err(DeserializeError::generic( "found sparse state tagged as special but \ wasn't actually special", )); } } let state = self.try_state(sp, id)?; verified.insert(id); // The next ID should be the offset immediately following `state`. id = StateID::new(wire::add( id.as_usize(), state.write_to_len(), "next state ID offset", )?) .map_err(|err| { DeserializeError::state_id_error(err, "next state ID offset") })?; len += 1; } // Now that we've checked that all top-level states are correct and // importantly, collected a set of valid state IDs, we have all the // information we need to check that all transitions are correct too. // // Note that we can't use `valid_ids` to iterate because it will // be empty in no-std no-alloc contexts. (And yes, that means our // verification isn't quite as good.) We can use `self.states()` // though at least, since we know that all states can at least be // decoded and traversed correctly. for state in self.states() { // Check that all transitions in this state are correct. for i in 0..state.ntrans { let to = state.next_at(i); // For no-alloc, we just check that the state can decode. It is // technically possible that the state ID could still point to // a non-existent state even if it decodes (fuzzing proved this // to be true), but it shouldn't result in any memory unsafety // or panics in non-debug mode. #[cfg(not(feature = "alloc"))] { let _ = self.try_state(sp, to)?; } #[cfg(feature = "alloc")] { if !verified.contains(&to) { return Err(DeserializeError::generic( "found transition that points to a \ non-existent state", )); } } } } if len != self.state_len { return Err(DeserializeError::generic( "mismatching sparse state length", )); } Ok(()) } /// Converts these transitions to a borrowed value. fn as_ref(&self) -> Transitions<&'_ [u8]> { Transitions { sparse: self.sparse(), classes: self.classes.clone(), state_len: self.state_len, pattern_len: self.pattern_len, } } /// Converts these transitions to an owned value. #[cfg(feature = "alloc")] fn to_owned(&self) -> Transitions<alloc::vec::Vec<u8>> { Transitions { sparse: self.sparse().to_vec(), classes: self.classes.clone(), state_len: self.state_len, pattern_len: self.pattern_len, } } /// Return a convenient representation of the given state. /// /// This panics if the state is invalid. /// /// This is marked as inline to help dramatically boost sparse searching, /// which decodes each state it enters to follow the next transition. Other /// functions involved are also inlined, which should hopefully eliminate /// a lot of the extraneous decoding that is never needed just to follow /// the next transition. #[cfg_attr(feature = "perf-inline", inline(always))] fn state(&self, id: StateID) -> State<'_> { let mut state = &self.sparse()[id.as_usize()..]; let mut ntrans = wire::read_u16(&state).as_usize(); let is_match = (1 << 15) & ntrans != 0; ntrans &= !(1 << 15); state = &state[2..]; let (input_ranges, state) = state.split_at(ntrans * 2); let (next, state) = state.split_at(ntrans * StateID::SIZE); let (pattern_ids, state) = if is_match { let npats = wire::read_u32(&state).as_usize(); state[4..].split_at(npats * 4) } else { (&[][..], state) }; let accel_len = usize::from(state[0]); let accel = &state[1..accel_len + 1]; State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel } } /// Like `state`, but will return an error if the state encoding is /// invalid. This is useful for verifying states after deserialization, /// which is required for a safe deserialization API. /// /// Note that this only verifies that this state is decodable and that /// all of its data is consistent. It does not verify that its state ID /// transitions point to valid states themselves, nor does it verify that /// every pattern ID is valid. fn try_state( &self, sp: &Special, id: StateID, ) -> Result<State<'_>, DeserializeError> { if id.as_usize() > self.sparse().len() { return Err(DeserializeError::generic( "invalid caller provided sparse state ID", )); } let mut state = &self.sparse()[id.as_usize()..]; // Encoding format starts with a u16 that stores the total number of // transitions in this state. let (mut ntrans, _) = wire::try_read_u16_as_usize(state, "state transition length")?; let is_match = ((1 << 15) & ntrans) != 0; ntrans &= !(1 << 15); state = &state[2..]; if ntrans > 257 || ntrans == 0 { return Err(DeserializeError::generic( "invalid transition length", )); } if is_match && !sp.is_match_state(id) { return Err(DeserializeError::generic( "state marked as match but not in match ID range", )); } else if !is_match && sp.is_match_state(id) { return Err(DeserializeError::generic( "state in match ID range but not marked as match state", )); } // Each transition has two pieces: an inclusive range of bytes on which // it is defined, and the state ID that those bytes transition to. The // pairs come first, followed by a corresponding sequence of state IDs. let input_ranges_len = ntrans.checked_mul(2).unwrap(); wire::check_slice_len(state, input_ranges_len, "sparse byte pairs")?; let (input_ranges, state) = state.split_at(input_ranges_len); // Every range should be of the form A-B, where A<=B. for pair in input_ranges.chunks(2) { let (start, end) = (pair[0], pair[1]); if start > end { return Err(DeserializeError::generic("invalid input range")); } } // And now extract the corresponding sequence of state IDs. We leave // this sequence as a &[u8] instead of a &[S] because sparse DFAs do // not have any alignment requirements. let next_len = ntrans .checked_mul(self.id_len()) .expect("state size * #trans should always fit in a usize"); wire::check_slice_len(state, next_len, "sparse trans state IDs")?; let (next, state) = state.split_at(next_len); // We can at least verify that every state ID is in bounds. for idbytes in next.chunks(self.id_len()) { let (id, _) = wire::read_state_id(idbytes, "sparse state ID in try_state")?; wire::check_slice_len( self.sparse(), id.as_usize(), "invalid sparse state ID", )?; } // If this is a match state, then read the pattern IDs for this state. // Pattern IDs is a u32-length prefixed sequence of native endian // encoded 32-bit integers. let (pattern_ids, state) = if is_match { let (npats, nr) = wire::try_read_u32_as_usize(state, "pattern ID length")?; let state = &state[nr..]; if npats == 0 { return Err(DeserializeError::generic( "state marked as a match, but has no pattern IDs", )); } let pattern_ids_len = wire::mul(npats, 4, "sparse pattern ID byte length")?; wire::check_slice_len( state, pattern_ids_len, "sparse pattern IDs", )?; let (pattern_ids, state) = state.split_at(pattern_ids_len); for patbytes in pattern_ids.chunks(PatternID::SIZE) { wire::read_pattern_id( patbytes, "sparse pattern ID in try_state", )?; } (pattern_ids, state) } else { (&[][..], state) }; // Now read this state's accelerator info. The first byte is the length // of the accelerator, which is typically 0 (for no acceleration) but // is no bigger than 3. The length indicates the number of bytes that // follow, where each byte corresponds to a transition out of this // state. if state.is_empty() { return Err(DeserializeError::generic("no accelerator length")); } let (accel_len, state) = (usize::from(state[0]), &state[1..]); if accel_len > 3 { return Err(DeserializeError::generic( "sparse invalid accelerator length", )); } else if accel_len == 0 && sp.is_accel_state(id) { return Err(DeserializeError::generic( "got no accelerators in state, but in accelerator ID range", )); } else if accel_len > 0 && !sp.is_accel_state(id) { return Err(DeserializeError::generic( "state in accelerator ID range, but has no accelerators", )); } wire::check_slice_len( state, accel_len, "sparse corrupt accelerator length", )?; let (accel, _) = (&state[..accel_len], &state[accel_len..]); let state = State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel, }; if sp.is_quit_state(state.next_at(state.ntrans - 1)) { return Err(DeserializeError::generic( "state with EOI transition to quit state is illegal", )); } Ok(state) } /// Return an iterator over all of the states in this DFA. /// /// The iterator returned yields tuples, where the first element is the /// state ID and the second element is the state itself. fn states(&self) -> StateIter<'_, T> { StateIter { trans: self, id: DEAD.as_usize() } } /// Returns the sparse transitions as raw bytes. fn sparse(&self) -> &[u8] { self.sparse.as_ref() } /// Returns the number of bytes represented by a single state ID. fn id_len(&self) -> usize { StateID::SIZE } /// Return the memory usage, in bytes, of these transitions. /// /// This does not include the size of a `Transitions` value itself. fn memory_usage(&self) -> usize { self.sparse().len() } } #[cfg(feature = "dfa-build")] impl<T: AsMut<[u8]>> Transitions<T> { /// Return a convenient mutable representation of the given state. /// This panics if the state is invalid. fn state_mut(&mut self, id: StateID) -> StateMut<'_> { let mut state = &mut self.sparse_mut()[id.as_usize()..]; let mut ntrans = wire::read_u16(&state).as_usize(); let is_match = (1 << 15) & ntrans != 0; ntrans &= !(1 << 15); state = &mut state[2..]; let (input_ranges, state) = state.split_at_mut(ntrans * 2); let (next, state) = state.split_at_mut(ntrans * StateID::SIZE); let (pattern_ids, state) = if is_match { let npats = wire::read_u32(&state).as_usize(); state[4..].split_at_mut(npats * 4) } else { (&mut [][..], state) }; let accel_len = usize::from(state[0]); let accel = &mut state[1..accel_len + 1]; StateMut { id, is_match, ntrans, input_ranges, next, pattern_ids, accel, } } /// Returns the sparse transitions as raw mutable bytes. fn sparse_mut(&mut self) -> &mut [u8] { self.sparse.as_mut() } } /// The set of all possible starting states in a DFA. /// /// See the eponymous type in the `dense` module for more details. This type /// is very similar to `dense::StartTable`, except that its underlying /// representation is `&[u8]` instead of `&[S]`. (The latter would require /// sparse DFAs to be aligned, which is explicitly something we do not require /// because we don't really need it.) #[derive(Clone)] struct StartTable<T> { /// The initial start state IDs as a contiguous table of native endian /// encoded integers, represented by `S`. /// /// In practice, T is either Vec<u8> or &[u8] and has no alignment /// requirements. /// /// The first `2 * stride` (currently always 8) entries always correspond /// to the starts states for the entire DFA, with the first 4 entries being /// for unanchored searches and the second 4 entries being for anchored /// searches. To keep things simple, we always use 8 entries even if the /// `StartKind` is not both. /// /// After that, there are `stride * patterns` state IDs, where `patterns` /// may be zero in the case of a DFA with no patterns or in the case where /// the DFA was built without enabling starting states for each pattern. table: T, /// The starting state configuration supported. When 'both', both /// unanchored and anchored searches work. When 'unanchored', anchored /// searches panic. When 'anchored', unanchored searches panic. kind: StartKind, /// The start state configuration for every possible byte. start_map: StartByteMap, /// The number of starting state IDs per pattern. stride: usize, /// The total number of patterns for which starting states are encoded. /// This is `None` for DFAs that were built without start states for each /// pattern. Thus, one cannot use this field to say how many patterns /// are in the DFA in all cases. It is specific to how many patterns are /// represented in this start table. pattern_len: Option<usize>, /// The universal starting state for unanchored searches. This is only /// present when the DFA supports unanchored searches and when all starting /// state IDs for an unanchored search are equivalent. universal_start_unanchored: Option<StateID>, /// The universal starting state for anchored searches. This is only /// present when the DFA supports anchored searches and when all starting /// state IDs for an anchored search are equivalent. universal_start_anchored: Option<StateID>, } #[cfg(feature = "dfa-build")] impl StartTable<Vec<u8>> { fn new<T: AsRef<[u32]>>( dfa: &dense::DFA<T>, pattern_len: Option<usize>, ) -> StartTable<Vec<u8>> { let stride = Start::len(); // This is OK since the only way we're here is if a dense DFA could be // constructed successfully, which uses the same space. let len = stride .checked_mul(pattern_len.unwrap_or(0)) .unwrap() .checked_add(stride.checked_mul(2).unwrap()) .unwrap() .checked_mul(StateID::SIZE) .unwrap(); StartTable { table: vec![0; len], kind: dfa.start_kind(), start_map: dfa.start_map().clone(), stride, pattern_len, universal_start_unanchored: dfa .universal_start_state(Anchored::No), universal_start_anchored: dfa.universal_start_state(Anchored::Yes), } } fn from_dense_dfa<T: AsRef<[u32]>>( dfa: &dense::DFA<T>, remap: &[StateID], ) -> Result<StartTable<Vec<u8>>, BuildError> { // Unless the DFA has start states compiled for each pattern, then // as far as the starting state table is concerned, there are zero // patterns to account for. It will instead only store starting states // for the entire DFA. let start_pattern_len = if dfa.starts_for_each_pattern() { Some(dfa.pattern_len()) } else { None }; let mut sl = StartTable::new(dfa, start_pattern_len); for (old_start_id, anchored, sty) in dfa.starts() { let new_start_id = remap[dfa.to_index(old_start_id)]; sl.set_start(anchored, sty, new_start_id); } Ok(sl) } } impl<'a> StartTable<&'a [u8]> { unsafe fn from_bytes_unchecked( mut slice: &'a [u8], ) -> Result<(StartTable<&'a [u8]>, usize), DeserializeError> { let slice_start = slice.as_ptr().as_usize(); let (kind, nr) = StartKind::from_bytes(slice)?; slice = &slice[nr..]; let (start_map, nr) = StartByteMap::from_bytes(slice)?; slice = &slice[nr..]; let (stride, nr) = wire::try_read_u32_as_usize(slice, "sparse start table stride")?; slice = &slice[nr..]; if stride != Start::len() { return Err(DeserializeError::generic( "invalid sparse starting table stride", )); } let (maybe_pattern_len, nr) = wire::try_read_u32_as_usize(slice, "sparse start table patterns")?; slice = &slice[nr..]; let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX { None } else { Some(maybe_pattern_len) }; if pattern_len.map_or(false, |len| len > PatternID::LIMIT) { return Err(DeserializeError::generic( "sparse invalid number of patterns", )); } let (universal_unanchored, nr) = wire::try_read_u32(slice, "universal unanchored start")?; slice = &slice[nr..]; let universal_start_unanchored = if universal_unanchored == u32::MAX { None } else { Some(StateID::try_from(universal_unanchored).map_err(|e| { DeserializeError::state_id_error( e, "universal unanchored start", ) })?) }; let (universal_anchored, nr) = wire::try_read_u32(slice, "universal anchored start")?; slice = &slice[nr..]; let universal_start_anchored = if universal_anchored == u32::MAX { None } else { Some(StateID::try_from(universal_anchored).map_err(|e| { DeserializeError::state_id_error(e, "universal anchored start") })?) }; let pattern_table_size = wire::mul( stride, pattern_len.unwrap_or(0), "sparse invalid pattern length", )?; // Our start states always start with a single stride of start states // for the entire automaton which permit it to match any pattern. What // follows it are an optional set of start states for each pattern. let start_state_len = wire::add( wire::mul(2, stride, "start state stride too big")?, pattern_table_size, "sparse invalid 'any' pattern starts size", )?; let table_bytes_len = wire::mul( start_state_len, StateID::SIZE, "sparse pattern table bytes length", )?; wire::check_slice_len( slice, table_bytes_len, "sparse start ID table", )?; let table = &slice[..table_bytes_len]; slice = &slice[table_bytes_len..]; let sl = StartTable { table, kind, start_map, stride, pattern_len, universal_start_unanchored, universal_start_anchored, }; Ok((sl, slice.as_ptr().as_usize() - slice_start)) } } impl<T: AsRef<[u8]>> StartTable<T> { fn write_to<E: Endian>( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small( "sparse starting table ids", )); } dst = &mut dst[..nwrite]; // write start kind let nw = self.kind.write_to::<E>(dst)?; dst = &mut dst[nw..]; // write start byte map let nw = self.start_map.write_to(dst)?; dst = &mut dst[nw..]; // write stride E::write_u32(u32::try_from(self.stride).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; // write pattern length E::write_u32( u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(), dst, ); dst = &mut dst[size_of::<u32>()..]; // write universal start unanchored state id, u32::MAX if absent E::write_u32( self.universal_start_unanchored .map_or(u32::MAX, |sid| sid.as_u32()), dst, ); dst = &mut dst[size_of::<u32>()..]; // write universal start anchored state id, u32::MAX if absent E::write_u32( self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()), dst, ); dst = &mut dst[size_of::<u32>()..]; // write start IDs for (sid, _, _) in self.iter() { E::write_u32(sid.as_u32(), dst); dst = &mut dst[StateID::SIZE..]; } Ok(nwrite) } /// Returns the number of bytes the serialized form of this transition /// table will use. fn write_to_len(&self) -> usize { self.kind.write_to_len() + self.start_map.write_to_len() + size_of::<u32>() // stride + size_of::<u32>() // # patterns + size_of::<u32>() // universal unanchored start + size_of::<u32>() // universal anchored start + self.table().len() } /// Validates that every starting state ID in this table is valid. /// /// That is, every starting state ID can be used to correctly decode a /// state in the DFA's sparse transitions. fn validate( &self, sp: &Special, trans: &Transitions<T>, ) -> Result<(), DeserializeError> { for (id, _, _) in self.iter() { if sp.is_match_state(id) { return Err(DeserializeError::generic( "start states cannot be match states", )); } // Confirm that the start state points to a valid state. let state = trans.try_state(sp, id)?; // And like for the transition table, confirm that the transitions // on all start states themselves point to a valid state. // // It'd probably be better to integrate this validation with the // transition table, or otherwise store a sorted sequence of all // valid state IDs in the sparse DFA itself. That way, we could // check that every pointer to a state corresponds precisely to a // correct and valid state. for i in 0..state.ntrans { let to = state.next_at(i); let _ = trans.try_state(sp, to)?; } } Ok(()) } /// Converts this start list to a borrowed value. fn as_ref(&self) -> StartTable<&'_ [u8]> { StartTable { table: self.table(), kind: self.kind, start_map: self.start_map.clone(), stride: self.stride, pattern_len: self.pattern_len, universal_start_unanchored: self.universal_start_unanchored, universal_start_anchored: self.universal_start_anchored, } } /// Converts this start list to an owned value. #[cfg(feature = "alloc")] fn to_owned(&self) -> StartTable<alloc::vec::Vec<u8>> { StartTable { table: self.table().to_vec(), kind: self.kind, start_map: self.start_map.clone(), stride: self.stride, pattern_len: self.pattern_len, universal_start_unanchored: self.universal_start_unanchored, universal_start_anchored: self.universal_start_anchored, } } /// Return the start state for the given index and pattern ID. If the /// pattern ID is None, then the corresponding start state for the entire /// DFA is returned. If the pattern ID is not None, then the corresponding /// starting state for the given pattern is returned. If this start table /// does not have individual starting states for each pattern, then this /// panics. fn start( &self, input: &Input<'_>, start: Start, ) -> Result<StateID, MatchError> { let start_index = start.as_usize(); let mode = input.get_anchored(); let index = match mode { Anchored::No => { if !self.kind.has_unanchored() { return Err(MatchError::unsupported_anchored(mode)); } start_index } Anchored::Yes => { if !self.kind.has_anchored() { return Err(MatchError::unsupported_anchored(mode)); } self.stride + start_index } Anchored::Pattern(pid) => { let len = match self.pattern_len { None => { return Err(MatchError::unsupported_anchored(mode)) } Some(len) => len, }; if pid.as_usize() >= len { return Ok(DEAD); } (2 * self.stride) + (self.stride * pid.as_usize()) + start_index } }; let start = index * StateID::SIZE; // This OK since we're allowed to assume that the start table contains // valid StateIDs. Ok(wire::read_state_id_unchecked(&self.table()[start..]).0) } /// Return an iterator over all start IDs in this table. fn iter(&self) -> StartStateIter<'_, T> { StartStateIter { st: self, i: 0 } } /// Returns the total number of start state IDs in this table. fn len(&self) -> usize { self.table().len() / StateID::SIZE } /// Returns the table as a raw slice of bytes. fn table(&self) -> &[u8] { self.table.as_ref() } /// Return the memory usage, in bytes, of this start list. /// /// This does not include the size of a `StartTable` value itself. fn memory_usage(&self) -> usize { self.table().len() } } #[cfg(feature = "dfa-build")] impl<T: AsMut<[u8]>> StartTable<T> { /// Set the start state for the given index and pattern. /// /// If the pattern ID or state ID are not valid, then this will panic. fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) { let start_index = start.as_usize(); let index = match anchored { Anchored::No => start_index, Anchored::Yes => self.stride + start_index, Anchored::Pattern(pid) => { let pid = pid.as_usize(); let len = self .pattern_len .expect("start states for each pattern enabled"); assert!(pid < len, "invalid pattern ID {:?}", pid); self.stride .checked_mul(pid) .unwrap() .checked_add(self.stride.checked_mul(2).unwrap()) .unwrap() .checked_add(start_index) .unwrap() } }; let start = index * StateID::SIZE; let end = start + StateID::SIZE; wire::write_state_id::<wire::NE>( id, &mut self.table.as_mut()[start..end], ); } } /// An iterator over all state state IDs in a sparse DFA. struct StartStateIter<'a, T> { st: &'a StartTable<T>, i: usize, } impl<'a, T: AsRef<[u8]>> Iterator for StartStateIter<'a, T> { type Item = (StateID, Anchored, Start); fn next(&mut self) -> Option<(StateID, Anchored, Start)> { let i = self.i; if i >= self.st.len() { return None; } self.i += 1; // This unwrap is okay since the stride of any DFA must always match // the number of start state types. let start_type = Start::from_usize(i % self.st.stride).unwrap(); let anchored = if i < self.st.stride { Anchored::No } else if i < (2 * self.st.stride) { Anchored::Yes } else { let pid = (i - (2 * self.st.stride)) / self.st.stride; Anchored::Pattern(PatternID::new(pid).unwrap()) }; let start = i * StateID::SIZE; let end = start + StateID::SIZE; let bytes = self.st.table()[start..end].try_into().unwrap(); // This is OK since we're allowed to assume that any IDs in this start // table are correct and valid for this DFA. let id = StateID::from_ne_bytes_unchecked(bytes); Some((id, anchored, start_type)) } } impl<'a, T> fmt::Debug for StartStateIter<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StartStateIter").field("i", &self.i).finish() } } /// An iterator over all states in a sparse DFA. /// /// This iterator yields tuples, where the first element is the state ID and /// the second element is the state itself. struct StateIter<'a, T> { trans: &'a Transitions<T>, id: usize, } impl<'a, T: AsRef<[u8]>> Iterator for StateIter<'a, T> { type Item = State<'a>; fn next(&mut self) -> Option<State<'a>> { if self.id >= self.trans.sparse().len() { return None; } let state = self.trans.state(StateID::new_unchecked(self.id)); self.id = self.id + state.write_to_len(); Some(state) } } impl<'a, T> fmt::Debug for StateIter<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StateIter").field("id", &self.id).finish() } } /// A representation of a sparse DFA state that can be cheaply materialized /// from a state identifier. #[derive(Clone)] struct State<'a> { /// The identifier of this state. id: StateID, /// Whether this is a match state or not. is_match: bool, /// The number of transitions in this state. ntrans: usize, /// Pairs of input ranges, where there is one pair for each transition. /// Each pair specifies an inclusive start and end byte range for the /// corresponding transition. input_ranges: &'a [u8], /// Transitions to the next state. This slice contains native endian /// encoded state identifiers, with `S` as the representation. Thus, there /// are `ntrans * size_of::<S>()` bytes in this slice. next: &'a [u8], /// If this is a match state, then this contains the pattern IDs that match /// when the DFA is in this state. /// /// This is a contiguous sequence of 32-bit native endian encoded integers. pattern_ids: &'a [u8], /// An accelerator for this state, if present. If this state has no /// accelerator, then this is an empty slice. When non-empty, this slice /// has length at most 3 and corresponds to the exhaustive set of bytes /// that must be seen in order to transition out of this state. accel: &'a [u8], } impl<'a> State<'a> { /// Searches for the next transition given an input byte. If no such /// transition could be found, then a dead state is returned. /// /// This is marked as inline to help dramatically boost sparse searching, /// which decodes each state it enters to follow the next transition. #[cfg_attr(feature = "perf-inline", inline(always))] fn next(&self, input: u8) -> StateID { // This straight linear search was observed to be much better than // binary search on ASCII haystacks, likely because a binary search // visits the ASCII case last but a linear search sees it first. A // binary search does do a little better on non-ASCII haystacks, but // not by much. There might be a better trade off lurking here. for i in 0..(self.ntrans - 1) { let (start, end) = self.range(i); if start <= input && input <= end { return self.next_at(i); } // We could bail early with an extra branch: if input < b1, then // we know we'll never find a matching transition. Interestingly, // this extra branch seems to not help performance, or will even // hurt it. It's likely very dependent on the DFA itself and what // is being searched. } DEAD } /// Returns the next state ID for the special EOI transition. fn next_eoi(&self) -> StateID { self.next_at(self.ntrans - 1) } /// Returns the identifier for this state. fn id(&self) -> StateID { self.id } /// Returns the inclusive input byte range for the ith transition in this /// state. fn range(&self, i: usize) -> (u8, u8) { (self.input_ranges[i * 2], self.input_ranges[i * 2 + 1]) } /// Returns the next state for the ith transition in this state. fn next_at(&self, i: usize) -> StateID { let start = i * StateID::SIZE; let end = start + StateID::SIZE; let bytes = self.next[start..end].try_into().unwrap(); StateID::from_ne_bytes_unchecked(bytes) } /// Returns the pattern ID for the given match index. If the match index /// is invalid, then this panics. fn pattern_id(&self, match_index: usize) -> PatternID { let start = match_index * PatternID::SIZE; wire::read_pattern_id_unchecked(&self.pattern_ids[start..]).0 } /// Returns the total number of pattern IDs for this state. This is always /// zero when `is_match` is false. fn pattern_len(&self) -> usize { assert_eq!(0, self.pattern_ids.len() % 4); self.pattern_ids.len() / 4 } /// Return an accelerator for this state. fn accelerator(&self) -> &'a [u8] { self.accel } /// Write the raw representation of this state to the given buffer using /// the given endianness. fn write_to<E: Endian>( &self, mut dst: &mut [u8], ) -> Result<usize, SerializeError> { let nwrite = self.write_to_len(); if dst.len() < nwrite { return Err(SerializeError::buffer_too_small( "sparse state transitions", )); } let ntrans = if self.is_match { self.ntrans | (1 << 15) } else { self.ntrans }; E::write_u16(u16::try_from(ntrans).unwrap(), dst); dst = &mut dst[size_of::<u16>()..]; dst[..self.input_ranges.len()].copy_from_slice(self.input_ranges); dst = &mut dst[self.input_ranges.len()..]; for i in 0..self.ntrans { E::write_u32(self.next_at(i).as_u32(), dst); dst = &mut dst[StateID::SIZE..]; } if self.is_match { E::write_u32(u32::try_from(self.pattern_len()).unwrap(), dst); dst = &mut dst[size_of::<u32>()..]; for i in 0..self.pattern_len() { let pid = self.pattern_id(i); E::write_u32(pid.as_u32(), dst); dst = &mut dst[PatternID::SIZE..]; } } dst[0] = u8::try_from(self.accel.len()).unwrap(); dst[1..][..self.accel.len()].copy_from_slice(self.accel); Ok(nwrite) } /// Return the total number of bytes that this state consumes in its /// encoded form. fn write_to_len(&self) -> usize { let mut len = 2 + (self.ntrans * 2) + (self.ntrans * StateID::SIZE) + (1 + self.accel.len()); if self.is_match { len += size_of::<u32>() + self.pattern_ids.len(); } len } } impl<'a> fmt::Debug for State<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut printed = false; for i in 0..(self.ntrans - 1) { let next = self.next_at(i); if next == DEAD { continue; } if printed { write!(f, ", ")?; } let (start, end) = self.range(i); if start == end { write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize())?; } else { write!( f, "{:?}-{:?} => {:?}", DebugByte(start), DebugByte(end), next.as_usize(), )?; } printed = true; } let eoi = self.next_at(self.ntrans - 1); if eoi != DEAD { if printed { write!(f, ", ")?; } write!(f, "EOI => {:?}", eoi.as_usize())?; } Ok(()) } } /// A representation of a mutable sparse DFA state that can be cheaply /// materialized from a state identifier. #[cfg(feature = "dfa-build")] struct StateMut<'a> { /// The identifier of this state. id: StateID, /// Whether this is a match state or not. is_match: bool, /// The number of transitions in this state. ntrans: usize, /// Pairs of input ranges, where there is one pair for each transition. /// Each pair specifies an inclusive start and end byte range for the /// corresponding transition. input_ranges: &'a mut [u8], /// Transitions to the next state. This slice contains native endian /// encoded state identifiers, with `S` as the representation. Thus, there /// are `ntrans * size_of::<S>()` bytes in this slice. next: &'a mut [u8], /// If this is a match state, then this contains the pattern IDs that match /// when the DFA is in this state. /// /// This is a contiguous sequence of 32-bit native endian encoded integers. pattern_ids: &'a [u8], /// An accelerator for this state, if present. If this state has no /// accelerator, then this is an empty slice. When non-empty, this slice /// has length at most 3 and corresponds to the exhaustive set of bytes /// that must be seen in order to transition out of this state. accel: &'a mut [u8], } #[cfg(feature = "dfa-build")] impl<'a> StateMut<'a> { /// Sets the ith transition to the given state. fn set_next_at(&mut self, i: usize, next: StateID) { let start = i * StateID::SIZE; let end = start + StateID::SIZE; wire::write_state_id::<wire::NE>(next, &mut self.next[start..end]); } } #[cfg(feature = "dfa-build")] impl<'a> fmt::Debug for StateMut<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = State { id: self.id, is_match: self.is_match, ntrans: self.ntrans, input_ranges: self.input_ranges, next: self.next, pattern_ids: self.pattern_ids, accel: self.accel, }; fmt::Debug::fmt(&state, f) } } /* /// A binary search routine specialized specifically to a sparse DFA state's /// transitions. Specifically, the transitions are defined as a set of pairs /// of input bytes that delineate an inclusive range of bytes. If the input /// byte is in the range, then the corresponding transition is a match. /// /// This binary search accepts a slice of these pairs and returns the position /// of the matching pair (the ith transition), or None if no matching pair /// could be found. /// /// Note that this routine is not currently used since it was observed to /// either decrease performance when searching ASCII, or did not provide enough /// of a boost on non-ASCII haystacks to be worth it. However, we leave it here /// for posterity in case we can find a way to use it. /// /// In theory, we could use the standard library's search routine if we could /// cast a `&[u8]` to a `&[(u8, u8)]`, but I don't believe this is currently /// guaranteed to be safe and is thus UB (since I don't think the in-memory /// representation of `(u8, u8)` has been nailed down). One could define a /// repr(C) type, but the casting doesn't seem justified. #[cfg_attr(feature = "perf-inline", inline(always))] fn binary_search_ranges(ranges: &[u8], needle: u8) -> Option<usize> { debug_assert!(ranges.len() % 2 == 0, "ranges must have even length"); debug_assert!(ranges.len() <= 512, "ranges should be short"); let (mut left, mut right) = (0, ranges.len() / 2); while left < right { let mid = (left + right) / 2; let (b1, b2) = (ranges[mid * 2], ranges[mid * 2 + 1]); if needle < b1 { right = mid; } else if needle > b2 { left = mid + 1; } else { return Some(mid); } } None } */ #[cfg(all(test, feature = "syntax", feature = "dfa-build"))] mod tests { use crate::{ dfa::{dense::DFA, Automaton}, nfa::thompson, Input, MatchError, }; // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. #[test] fn heuristic_unicode_forward() { let dfa = DFA::builder() .configure(DFA::config().unicode_word_boundary(true)) .thompson(thompson::Config::new().reverse(true)) .build(r"\b[0-9]+\b") .unwrap() .to_sparse() .unwrap(); let input = Input::new("β123").range(2..); let expected = MatchError::quit(0xB2, 1); let got = dfa.try_search_fwd(&input); assert_eq!(Err(expected), got); let input = Input::new("123β").range(..3); let expected = MatchError::quit(0xCE, 3); let got = dfa.try_search_fwd(&input); assert_eq!(Err(expected), got); } // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. #[test] fn heuristic_unicode_reverse() { let dfa = DFA::builder() .configure(DFA::config().unicode_word_boundary(true)) .thompson(thompson::Config::new().reverse(true)) .build(r"\b[0-9]+\b") .unwrap() .to_sparse() .unwrap(); let input = Input::new("β123").range(2..); let expected = MatchError::quit(0xB2, 1); let got = dfa.try_search_rev(&input); assert_eq!(Err(expected), got); let input = Input::new("123β").range(..3); let expected = MatchError::quit(0xCE, 3); let got = dfa.try_search_rev(&input); assert_eq!(Err(expected), got); } } <file_sep>/regex-lite/src/nfa.rs use core::{cell::RefCell, mem::size_of}; use alloc::{string::String, sync::Arc, vec, vec::Vec}; use crate::{ error::Error, hir::{self, Hir, HirKind}, int::U32, }; pub(crate) type StateID = u32; #[derive(Clone, Copy, Debug)] pub(crate) struct Config { pub(crate) size_limit: Option<usize>, } impl Default for Config { fn default() -> Config { Config { size_limit: Some(10 * (1 << 20)) } } } #[derive(Clone)] pub(crate) struct NFA { /// The pattern string this NFA was generated from. /// /// We put it here for lack of a better place to put it. ¯\_(ツ)_/¯ pattern: String, /// The states that make up this NFA. states: Vec<State>, /// The ID of the start state. start: StateID, /// Whether this NFA can only match at the beginning of a haystack. is_start_anchored: bool, /// Whether this NFA can match the empty string. is_match_empty: bool, /// If every match has the same number of matching capture groups, then /// this corresponds to the number of groups. static_explicit_captures_len: Option<usize>, /// A map from capture group name to its corresponding index. cap_name_to_index: CaptureNameMap, /// A map from capture group index to the corresponding name, if one /// exists. cap_index_to_name: Vec<Option<Arc<str>>>, /// Heap memory used indirectly by NFA states and other things (like the /// various capturing group representations above). Since each state /// might use a different amount of heap, we need to keep track of this /// incrementally. memory_extra: usize, } impl NFA { /// Creates a new NFA from the given configuration and HIR. pub(crate) fn new( config: Config, pattern: String, hir: &Hir, ) -> Result<NFA, Error> { Compiler::new(config, pattern).compile(hir) } /// Returns the pattern string used to construct this NFA. pub(crate) fn pattern(&self) -> &str { &self.pattern } /// Returns the state corresponding to the given ID. /// /// # Panics /// /// If the ID does not refer to a valid state, then this panics. pub(crate) fn state(&self, id: StateID) -> &State { &self.states[id.as_usize()] } /// Returns the total number of states in this NFA. pub(crate) fn len(&self) -> usize { self.states.len() } /// Returns the ID of the starting state for this NFA. pub(crate) fn start(&self) -> StateID { self.start } /// Returns the capture group index for the corresponding named group. /// If no such group with the given name exists, then `None` is returned. pub(crate) fn to_index(&self, name: &str) -> Option<usize> { self.cap_name_to_index.get(name).cloned().map(|i| i.as_usize()) } /* /// Returns the capture group name for the corresponding index. /// If no such group with the given index, then `None` is returned. pub(crate) fn to_name(&self, index: usize) -> Option<&str> { self.cap_index_to_name.get(index)?.as_deref() } */ /// Returns an iterator over all of the capture groups, along with their /// names if they exist, in this NFA. pub(crate) fn capture_names(&self) -> CaptureNames<'_> { CaptureNames { it: self.cap_index_to_name.iter() } } /// Returns the total number of capture groups, including the first and /// implicit group, in this NFA. pub(crate) fn group_len(&self) -> usize { self.cap_index_to_name.len() } /// Returns true if and only if this NFA can only match at the beginning of /// a haystack. pub(crate) fn is_start_anchored(&self) -> bool { self.is_start_anchored } /// If the pattern always reports the same number of matching capture groups /// for every match, then this returns the number of those groups. This /// doesn't include the implicit group found in every pattern. pub(crate) fn static_explicit_captures_len(&self) -> Option<usize> { self.static_explicit_captures_len } /// Returns the heap memory usage, in bytes, used by this NFA. fn memory_usage(&self) -> usize { (self.states.len() * size_of::<State>()) + (self.cap_index_to_name.len() * size_of::<Option<Arc<str>>>()) + self.memory_extra } } impl core::fmt::Debug for NFA { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { writeln!(f, "NFA(")?; writeln!(f, "pattern: {}", self.pattern)?; for (sid, state) in self.states.iter().enumerate() { writeln!(f, "{:07?}: {:?}", sid, state)?; } writeln!(f, ")")?; Ok(()) } } /// An iterator over all capture groups in an NFA. /// /// If a particular group has a name, then it is yielded. Otherwise, `None` /// is yielded. #[derive(Clone, Debug)] pub(crate) struct CaptureNames<'a> { it: core::slice::Iter<'a, Option<Arc<str>>>, } impl<'a> Iterator for CaptureNames<'a> { type Item = Option<&'a str>; fn next(&mut self) -> Option<Option<&'a str>> { self.it.next().map(|n| n.as_deref()) } } #[derive(Clone, Eq, PartialEq)] pub(crate) enum State { Char { target: StateID, ch: char }, Ranges { target: StateID, ranges: Vec<(char, char)> }, Splits { targets: Vec<StateID>, reverse: bool }, Goto { target: StateID, look: Option<hir::Look> }, Capture { target: StateID, slot: u32 }, Fail, Match, } impl State { /// Returns the heap memory usage of this NFA state in bytes. fn memory_usage(&self) -> usize { match *self { State::Char { .. } | State::Goto { .. } | State::Capture { .. } | State::Fail { .. } | State::Match => 0, State::Splits { ref targets, .. } => { targets.len() * size_of::<StateID>() } State::Ranges { ref ranges, .. } => { ranges.len() * size_of::<(char, char)>() } } } /// Returns an iterator over the given split targets. The order of the /// iterator yields elements in reverse when `reverse` is true. pub(crate) fn iter_splits<'a>( splits: &'a [StateID], reverse: bool, ) -> impl Iterator<Item = StateID> + 'a { let mut it = splits.iter(); core::iter::from_fn(move || { if reverse { it.next_back() } else { it.next() }.copied() }) } } impl core::fmt::Debug for State { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match *self { State::Char { target, ch } => { write!(f, "{:?} => {:?}", ch, target) } State::Ranges { target, ref ranges } => { for (i, &(start, end)) in ranges.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{:?}-{:?} => {:?}", start, end, target)?; } Ok(()) } State::Splits { ref targets, reverse } => { write!(f, "splits(")?; for (i, sid) in State::iter_splits(targets, reverse).enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{:?}", sid)?; } write!(f, ")") } State::Goto { target, look: None } => { write!(f, "goto({:?})", target) } State::Goto { target, look: Some(look) } => { write!(f, "{:?} => {:?}", look, target) } State::Capture { target, slot } => { write!(f, "capture(slot={:?}) => {:?}", slot, target,) } State::Fail => write!(f, "FAIL"), State::Match => { write!(f, "MATCH") } } } } /// A map from capture group name to its corresponding capture group index. /// /// We define a type alias here so that we can transparently use a `HashMap` /// whenever it's available. We do so presumably because it's faster, although /// there are no benchmarks verifying this. #[cfg(feature = "std")] type CaptureNameMap = std::collections::HashMap<Arc<str>, u32>; #[cfg(not(feature = "std"))] type CaptureNameMap = alloc::collections::BTreeMap<Arc<str>, u32>; #[derive(Debug)] struct Compiler { config: Config, nfa: RefCell<NFA>, } impl Compiler { fn new(config: Config, pattern: String) -> Compiler { let nfa = RefCell::new(NFA { pattern, states: vec![], start: 0, is_start_anchored: false, is_match_empty: false, static_explicit_captures_len: None, cap_name_to_index: CaptureNameMap::default(), cap_index_to_name: vec![], memory_extra: 0, }); Compiler { config, nfa } } fn compile(self, hir: &Hir) -> Result<NFA, Error> { self.nfa.borrow_mut().is_start_anchored = hir.is_start_anchored(); self.nfa.borrow_mut().is_match_empty = hir.is_match_empty(); self.nfa.borrow_mut().static_explicit_captures_len = hir.static_explicit_captures_len(); let compiled = self.c_capture(0, None, hir)?; let mat = self.add(State::Match)?; self.patch(compiled.end, mat)?; self.nfa.borrow_mut().start = compiled.start; Ok(self.nfa.into_inner()) } fn c(&self, hir: &Hir) -> Result<ThompsonRef, Error> { match *hir.kind() { HirKind::Empty => self.c_empty(), HirKind::Char(ch) => self.c_char(ch), HirKind::Class(ref class) => self.c_class(class), HirKind::Look(ref look) => self.c_look(look), HirKind::Repetition(ref rep) => self.c_repetition(rep), HirKind::Capture(ref cap) => { self.c_capture(cap.index, cap.name.as_deref(), &cap.sub) } HirKind::Concat(ref subs) => { self.c_concat(subs.iter().map(|s| self.c(s))) } HirKind::Alternation(ref subs) => { self.c_alternation(subs.iter().map(|s| self.c(s))) } } } /// Compile a "fail" state that can never be transitioned out of. fn c_fail(&self) -> Result<ThompsonRef, Error> { let id = self.add(State::Fail)?; Ok(ThompsonRef { start: id, end: id }) } /// Compile an "empty" state with one unconditional epsilon transition. /// /// Both the `start` and `end` locations point to the state created. /// Callers will likely want to keep the `start`, but patch the `end` to /// point to some other state. fn c_empty(&self) -> Result<ThompsonRef, Error> { let id = self.add_empty()?; Ok(ThompsonRef { start: id, end: id }) } /// Compile the given literal char to an NFA. fn c_char(&self, ch: char) -> Result<ThompsonRef, Error> { let id = self.add(State::Char { target: 0, ch })?; Ok(ThompsonRef { start: id, end: id }) } /// Compile the given character class into an NFA. /// /// If the class is empty, then this compiles to a `Fail` state. fn c_class(&self, class: &hir::Class) -> Result<ThompsonRef, Error> { let id = if class.ranges.is_empty() { // Technically using an explicit fail state probably isn't // necessary. Because if you try to match against an empty Ranges, // then it should turn up with nothing regardless of input, and // thus "acts" like a Fail state. But it's better to be more // explicit, and there's no real cost to doing so. self.add(State::Fail) } else { let ranges = class.ranges.iter().map(|r| (r.start, r.end)).collect(); self.add(State::Ranges { target: 0, ranges }) }?; Ok(ThompsonRef { start: id, end: id }) } /// Compile the given HIR look-around assertion to an NFA look-around /// assertion. fn c_look(&self, look: &hir::Look) -> Result<ThompsonRef, Error> { let id = self.add(State::Goto { target: 0, look: Some(*look) })?; Ok(ThompsonRef { start: id, end: id }) } /// Compile the given repetition expression. This handles all types of /// repetitions and greediness. fn c_repetition( &self, rep: &hir::Repetition, ) -> Result<ThompsonRef, Error> { match (rep.min, rep.max) { (0, Some(1)) => self.c_zero_or_one(&rep.sub, rep.greedy), (min, None) => self.c_at_least(&rep.sub, rep.greedy, min), (min, Some(max)) if min == max => self.c_exactly(&rep.sub, min), (min, Some(max)) => self.c_bounded(&rep.sub, rep.greedy, min, max), } } /// Compile the given expression such that it matches at least `min` times, /// but no more than `max` times. /// /// When `greedy` is true, then the preference is for the expression to /// match as much as possible. Otherwise, it will match as little as /// possible. fn c_bounded( &self, hir: &Hir, greedy: bool, min: u32, max: u32, ) -> Result<ThompsonRef, Error> { let prefix = self.c_exactly(hir, min)?; if min == max { return Ok(prefix); } // It is tempting here to compile the rest here as a concatenation // of zero-or-one matches. i.e., for `a{2,5}`, compile it as if it // were `aaa?a?a?`. The problem here is that it leads to this program: // // >000000: 61 => 01 // 000001: 61 => 02 // 000002: union(03, 04) // 000003: 61 => 04 // 000004: union(05, 06) // 000005: 61 => 06 // 000006: union(07, 08) // 000007: 61 => 08 // 000008: MATCH // // And effectively, once you hit state 2, the epsilon closure will // include states 3, 5, 6, 7 and 8, which is quite a bit. It is better // to instead compile it like so: // // >000000: 61 => 01 // 000001: 61 => 02 // 000002: union(03, 08) // 000003: 61 => 04 // 000004: union(05, 08) // 000005: 61 => 06 // 000006: union(07, 08) // 000007: 61 => 08 // 000008: MATCH // // So that the epsilon closure of state 2 is now just 3 and 8. let empty = self.add_empty()?; let mut prev_end = prefix.end; for _ in min..max { let splits = self.add(State::Splits { targets: vec![], reverse: !greedy })?; let compiled = self.c(hir)?; self.patch(prev_end, splits)?; self.patch(splits, compiled.start)?; self.patch(splits, empty)?; prev_end = compiled.end; } self.patch(prev_end, empty)?; Ok(ThompsonRef { start: prefix.start, end: empty }) } /// Compile the given expression such that it may be matched `n` or more /// times, where `n` can be any integer. (Although a particularly large /// integer is likely to run afoul of any configured size limits.) /// /// When `greedy` is true, then the preference is for the expression to /// match as much as possible. Otherwise, it will match as little as /// possible. fn c_at_least( &self, hir: &Hir, greedy: bool, n: u32, ) -> Result<ThompsonRef, Error> { if n == 0 { // When the expression cannot match the empty string, then we // can get away with something much simpler: just one 'alt' // instruction that optionally repeats itself. But if the expr // can match the empty string... see below. if !hir.is_match_empty() { let splits = self.add(State::Splits { targets: vec![], reverse: !greedy, })?; let compiled = self.c(hir)?; self.patch(splits, compiled.start)?; self.patch(compiled.end, splits)?; return Ok(ThompsonRef { start: splits, end: splits }); } // What's going on here? Shouldn't x* be simpler than this? It // turns out that when implementing leftmost-first (Perl-like) // match semantics, x* results in an incorrect preference order // when computing the transitive closure of states if and only if // 'x' can match the empty string. So instead, we compile x* as // (x+)?, which preserves the correct preference order. // // See: https://github.com/rust-lang/regex/issues/779 let compiled = self.c(hir)?; let plus = self.add(State::Splits { targets: vec![], reverse: !greedy })?; self.patch(compiled.end, plus)?; self.patch(plus, compiled.start)?; let question = self.add(State::Splits { targets: vec![], reverse: !greedy })?; let empty = self.add_empty()?; self.patch(question, compiled.start)?; self.patch(question, empty)?; self.patch(plus, empty)?; Ok(ThompsonRef { start: question, end: empty }) } else if n == 1 { let compiled = self.c(hir)?; let splits = self.add(State::Splits { targets: vec![], reverse: !greedy })?; self.patch(compiled.end, splits)?; self.patch(splits, compiled.start)?; Ok(ThompsonRef { start: compiled.start, end: splits }) } else { let prefix = self.c_exactly(hir, n - 1)?; let last = self.c(hir)?; let splits = self.add(State::Splits { targets: vec![], reverse: !greedy })?; self.patch(prefix.end, last.start)?; self.patch(last.end, splits)?; self.patch(splits, last.start)?; Ok(ThompsonRef { start: prefix.start, end: splits }) } } /// Compile the given expression such that it may be matched zero or one /// times. /// /// When `greedy` is true, then the preference is for the expression to /// match as much as possible. Otherwise, it will match as little as /// possible. fn c_zero_or_one( &self, hir: &Hir, greedy: bool, ) -> Result<ThompsonRef, Error> { let splits = self.add(State::Splits { targets: vec![], reverse: !greedy })?; let compiled = self.c(hir)?; let empty = self.add_empty()?; self.patch(splits, compiled.start)?; self.patch(splits, empty)?; self.patch(compiled.end, empty)?; Ok(ThompsonRef { start: splits, end: empty }) } /// Compile the given HIR expression exactly `n` times. fn c_exactly(&self, hir: &Hir, n: u32) -> Result<ThompsonRef, Error> { self.c_concat((0..n).map(|_| self.c(hir))) } /// Compile the given expression and insert capturing states at the /// beginning and end of it. The slot for the capture states is computed /// from the index. fn c_capture( &self, index: u32, name: Option<&str>, hir: &Hir, ) -> Result<ThompsonRef, Error> { // For discontiguous indices, push placeholders for earlier capture // groups that weren't explicitly added. This can happen, for example, // with patterns like '(a){0}(a)' where '(a){0}' is completely removed // from the pattern. let existing_groups_len = self.nfa.borrow().cap_index_to_name.len(); for _ in 0..(index.as_usize().saturating_sub(existing_groups_len)) { self.nfa.borrow_mut().cap_index_to_name.push(None); } if index.as_usize() >= existing_groups_len { if let Some(name) = name { let name = Arc::from(name); let mut nfa = self.nfa.borrow_mut(); nfa.cap_name_to_index.insert(Arc::clone(&name), index); nfa.cap_index_to_name.push(Some(Arc::clone(&name))); // This is an approximation. nfa.memory_extra += name.len() + size_of::<u32>(); } else { self.nfa.borrow_mut().cap_index_to_name.push(None); } } let Some(slot) = index.checked_mul(2) else { return Err(Error::new("capture group slots exhausted")); }; let start = self.add(State::Capture { target: 0, slot })?; let inner = self.c(hir)?; let Some(slot) = slot.checked_add(1) else { return Err(Error::new("capture group slots exhausted")); }; let end = self.add(State::Capture { target: 0, slot })?; self.patch(start, inner.start)?; self.patch(inner.end, end)?; Ok(ThompsonRef { start, end }) } /// Compile a concatenation of the sub-expressions yielded by the given /// iterator. If the iterator yields no elements, then this compiles down /// to an "empty" state that always matches. fn c_concat<I>(&self, mut it: I) -> Result<ThompsonRef, Error> where I: Iterator<Item = Result<ThompsonRef, Error>>, { let ThompsonRef { start, mut end } = match it.next() { Some(result) => result?, None => return self.c_empty(), }; for result in it { let compiled = result?; self.patch(end, compiled.start)?; end = compiled.end; } Ok(ThompsonRef { start, end }) } /// Compile an alternation, where each element yielded by the given /// iterator represents an item in the alternation. If the iterator yields /// no elements, then this compiles down to a "fail" state. /// /// In an alternation, expressions appearing earlier are "preferred" at /// match time over expressions appearing later. (This is currently always /// true, as this crate only supports leftmost-first semantics.) fn c_alternation<I>(&self, mut it: I) -> Result<ThompsonRef, Error> where I: Iterator<Item = Result<ThompsonRef, Error>>, { let first = match it.next() { None => return self.c_fail(), Some(result) => result?, }; let second = match it.next() { None => return Ok(first), Some(result) => result?, }; let splits = self.add(State::Splits { targets: vec![], reverse: false })?; let end = self.add_empty()?; self.patch(splits, first.start)?; self.patch(first.end, end)?; self.patch(splits, second.start)?; self.patch(second.end, end)?; for result in it { let compiled = result?; self.patch(splits, compiled.start)?; self.patch(compiled.end, end)?; } Ok(ThompsonRef { start: splits, end }) } /// A convenience routine for adding an empty state, also known as an /// unconditional epsilon transition. These are quite useful for making /// NFA construction simpler. /// /// (In the regex crate, we do a second pass to remove these, but don't /// bother with that here.) fn add_empty(&self) -> Result<StateID, Error> { self.add(State::Goto { target: 0, look: None }) } /// The common implementation of "add a state." It handles the common /// error cases of state ID exhausting (by owning state ID allocation) and /// whether the size limit has been exceeded. fn add(&self, state: State) -> Result<StateID, Error> { let id = u32::try_from(self.nfa.borrow().states.len()) .map_err(|_| Error::new("exhausted state IDs, too many states"))?; self.nfa.borrow_mut().memory_extra += state.memory_usage(); self.nfa.borrow_mut().states.push(state); self.check_size_limit()?; Ok(id) } /// Add a transition from one state to another. /// /// This routine is called "patch" since it is very common to add the /// states you want, typically with "dummy" state ID transitions, and then /// "patch" in the real state IDs later. This is because you don't always /// know all of the necessary state IDs to add because they might not /// exist yet. /// /// # Errors /// /// This may error if patching leads to an increase in heap usage beyond /// the configured size limit. Heap usage only grows when patching adds a /// new transition (as in the case of a "splits" state). fn patch(&self, from: StateID, to: StateID) -> Result<(), Error> { let mut new_memory_extra = self.nfa.borrow().memory_extra; match self.nfa.borrow_mut().states[from.as_usize()] { State::Char { ref mut target, .. } => { *target = to; } State::Ranges { ref mut target, .. } => { *target = to; } State::Splits { ref mut targets, .. } => { targets.push(to); new_memory_extra += size_of::<StateID>(); } State::Goto { ref mut target, .. } => { *target = to; } State::Capture { ref mut target, .. } => { *target = to; } State::Fail | State::Match => {} } if new_memory_extra != self.nfa.borrow().memory_extra { self.nfa.borrow_mut().memory_extra = new_memory_extra; self.check_size_limit()?; } Ok(()) } /// Checks that the current heap memory usage of the NFA being compiled /// doesn't exceed the configured size limit. If it does, an error is /// returned. fn check_size_limit(&self) -> Result<(), Error> { if let Some(limit) = self.config.size_limit { if self.nfa.borrow().memory_usage() > limit { return Err(Error::new("compiled regex exceeded size limit")); } } Ok(()) } } /// A value that represents the result of compiling a sub-expression of a /// regex's HIR. Specifically, this represents a sub-graph of the NFA that /// has an initial state at `start` and a final state at `end`. #[derive(Clone, Copy, Debug)] struct ThompsonRef { start: StateID, end: StateID, } <file_sep>/fuzz/fuzz_targets/ast_roundtrip.rs #![no_main] use { libfuzzer_sys::{fuzz_target, Corpus}, regex_syntax::ast::{ parse::Parser, visit, Ast, Flag, Group, GroupKind, SetFlags, Visitor, }, }; #[derive(Eq, PartialEq, arbitrary::Arbitrary)] struct FuzzData { ast: Ast, } impl std::fmt::Debug for FuzzData { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut builder = f.debug_struct("FuzzData"); builder.field("ast", &self.ast); builder.field("stringified", &format!("{}", self.ast)); builder.finish() } } struct VerboseVisitor; impl Visitor for VerboseVisitor { type Output = (); type Err = (); fn finish(self) -> Result<Self::Output, Self::Err> { Ok(()) } fn visit_pre(&mut self, ast: &Ast) -> Result<Self::Output, Self::Err> { match ast { Ast::Flags(SetFlags { flags, .. }) | Ast::Group(Group { kind: GroupKind::NonCapturing(flags), .. }) if flags .flag_state(Flag::IgnoreWhitespace) .unwrap_or(false) => { Err(()) } _ => Ok(()), } } } fuzz_target!(|data: FuzzData| -> Corpus { let _ = env_logger::try_init(); let pattern = format!("{}", data.ast); let Ok(ast) = Parser::new().parse(&pattern) else { return Corpus::Keep; }; if visit(&ast, VerboseVisitor).is_err() { return Corpus::Reject; } let ast2 = Parser::new().parse(&ast.to_string()).unwrap(); assert_eq!( ast, ast2, "Found difference:\ \nleft: {:?}\ \nright: {:?}\ \nIf these two match, then there was a parsing difference; \ maybe non-determinism?", ast.to_string(), ast2.to_string() ); Corpus::Keep }); <file_sep>/regex-automata/src/dfa/onepass.rs /*! A DFA that can return spans for matching capturing groups. This module is the home of a [one-pass DFA](DFA). This module also contains a [`Builder`] and a [`Config`] for building and configuring a one-pass DFA. */ // A note on naming and credit: // // As far as I know, <NAME> came up with the practical vision and // implementation of a "one-pass regex engine." He mentions and describes it // briefly in the third article of his regexp article series: // https://swtch.com/~rsc/regexp/regexp3.html // // Cox's implementation is in RE2, and the implementation below is most // heavily inspired by RE2's. The key thing they have in common is that // their transitions are defined over an alphabet of bytes. In contrast, // Go's regex engine also has a one-pass engine, but its transitions are // more firmly rooted on Unicode codepoints. The ideas are the same, but the // implementations are different. // // RE2 tends to call this a "one-pass NFA." Here, we call it a "one-pass DFA." // They're both true in their own ways: // // * The "one-pass" criterion is generally a property of the NFA itself. In // particular, it is said that an NFA is one-pass if, after each byte of input // during a search, there is at most one "VM thread" remaining to take for the // next byte of input. That is, there is never any ambiguity as to the path to // take through the NFA during a search. // // * On the other hand, once a one-pass NFA has its representation converted // to something where a constant number of instructions is used for each byte // of input, the implementation looks a lot more like a DFA. It's technically // more powerful than a DFA since it has side effects (storing offsets inside // of slots activated by a transition), but it is far closer to a DFA than an // NFA simulation. // // Thus, in this crate, we call it a one-pass DFA. use alloc::{vec, vec::Vec}; use crate::{ dfa::{remapper::Remapper, DEAD}, nfa::thompson::{self, NFA}, util::{ alphabet::ByteClasses, captures::Captures, escape::DebugByte, int::{Usize, U32, U64, U8}, look::{Look, LookSet, UnicodeWordBoundaryError}, primitives::{NonMaxUsize, PatternID, StateID}, search::{Anchored, Input, Match, MatchError, MatchKind, Span}, sparse_set::SparseSet, }, }; /// The configuration used for building a [one-pass DFA](DFA). /// /// A one-pass DFA configuration is a simple data object that is typically used /// with [`Builder::configure`]. It can be cheaply cloned. /// /// A default configuration can be created either with `Config::new`, or /// perhaps more conveniently, with [`DFA::config`]. #[derive(Clone, Debug, Default)] pub struct Config { match_kind: Option<MatchKind>, starts_for_each_pattern: Option<bool>, byte_classes: Option<bool>, size_limit: Option<Option<usize>>, } impl Config { /// Return a new default one-pass DFA configuration. pub fn new() -> Config { Config::default() } /// Set the desired match semantics. /// /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the /// match semantics of Perl-like regex engines. That is, when multiple /// patterns would match at the same leftmost position, the pattern that /// appears first in the concrete syntax is chosen. /// /// Currently, the only other kind of match semantics supported is /// [`MatchKind::All`]. This corresponds to "classical DFA" construction /// where all possible matches are visited. /// /// When it comes to the one-pass DFA, it is rarer for preference order and /// "longest match" to actually disagree. Since if they did disagree, then /// the regex typically isn't one-pass. For example, searching `Samwise` /// for `Sam|Samwise` will report `Sam` for leftmost-first matching and /// `Samwise` for "longest match" or "all" matching. However, this regex is /// not one-pass if taken literally. The equivalent regex, `Sam(?:|wise)` /// is one-pass and `Sam|Samwise` may be optimized to it. /// /// The other main difference is that "all" match semantics don't support /// non-greedy matches. "All" match semantics always try to match as much /// as possible. pub fn match_kind(mut self, kind: MatchKind) -> Config { self.match_kind = Some(kind); self } /// Whether to compile a separate start state for each pattern in the /// one-pass DFA. /// /// When enabled, a separate **anchored** start state is added for each /// pattern in the DFA. When this start state is used, then the DFA will /// only search for matches for the pattern specified, even if there are /// other patterns in the DFA. /// /// The main downside of this option is that it can potentially increase /// the size of the DFA and/or increase the time it takes to build the DFA. /// /// You might want to enable this option when you want to both search for /// anchored matches of any pattern or to search for anchored matches of /// one particular pattern while using the same DFA. (Otherwise, you would /// need to compile a new DFA for each pattern.) /// /// By default this is disabled. /// /// # Example /// /// This example shows how to build a multi-regex and then search for /// matches for a any of the patterns or matches for a specific pattern. /// /// ``` /// use regex_automata::{ /// dfa::onepass::DFA, Anchored, Input, Match, PatternID, /// }; /// /// let re = DFA::builder() /// .configure(DFA::config().starts_for_each_pattern(true)) /// .build_many(&["[a-z]+", "[0-9]+"])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "123abc"; /// let input = Input::new(haystack).anchored(Anchored::Yes); /// /// // A normal multi-pattern search will show pattern 1 matches. /// re.try_search(&mut cache, &input, &mut caps)?; /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); /// /// // If we only want to report pattern 0 matches, then we'll get no /// // match here. /// let input = input.anchored(Anchored::Pattern(PatternID::must(0))); /// re.try_search(&mut cache, &input, &mut caps)?; /// assert_eq!(None, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { self.starts_for_each_pattern = Some(yes); self } /// Whether to attempt to shrink the size of the DFA's alphabet or not. /// /// This option is enabled by default and should never be disabled unless /// one is debugging a one-pass DFA. /// /// When enabled, the DFA will use a map from all possible bytes to their /// corresponding equivalence class. Each equivalence class represents a /// set of bytes that does not discriminate between a match and a non-match /// in the DFA. For example, the pattern `[ab]+` has at least two /// equivalence classes: a set containing `a` and `b` and a set containing /// every byte except for `a` and `b`. `a` and `b` are in the same /// equivalence class because they never discriminate between a match and a /// non-match. /// /// The advantage of this map is that the size of the transition table /// can be reduced drastically from (approximately) `#states * 256 * /// sizeof(StateID)` to `#states * k * sizeof(StateID)` where `k` is the /// number of equivalence classes (rounded up to the nearest power of 2). /// As a result, total space usage can decrease substantially. Moreover, /// since a smaller alphabet is used, DFA compilation becomes faster as /// well. /// /// **WARNING:** This is only useful for debugging DFAs. Disabling this /// does not yield any speed advantages. Namely, even when this is /// disabled, a byte class map is still used while searching. The only /// difference is that every byte will be forced into its own distinct /// equivalence class. This is useful for debugging the actual generated /// transitions because it lets one see the transitions defined on actual /// bytes instead of the equivalence classes. pub fn byte_classes(mut self, yes: bool) -> Config { self.byte_classes = Some(yes); self } /// Set a size limit on the total heap used by a one-pass DFA. /// /// This size limit is expressed in bytes and is applied during /// construction of a one-pass DFA. If the DFA's heap usage exceeds /// this configured limit, then construction is stopped and an error is /// returned. /// /// The default is no limit. /// /// # Example /// /// This example shows a one-pass DFA that fails to build because of /// a configured size limit. This particular example also serves as a /// cautionary tale demonstrating just how big DFAs with large Unicode /// character classes can get. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::onepass::DFA, Match}; /// /// // 6MB isn't enough! /// DFA::builder() /// .configure(DFA::config().size_limit(Some(6_000_000))) /// .build(r"\w{20}") /// .unwrap_err(); /// /// // ... but 7MB probably is! /// // (Note that DFA sizes aren't necessarily stable between releases.) /// let re = DFA::builder() /// .configure(DFA::config().size_limit(Some(7_000_000))) /// .build(r"\w{20}")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "A".repeat(20); /// re.captures(&mut cache, &haystack, &mut caps); /// assert_eq!(Some(Match::must(0, 0..20)), caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// While one needs a little more than 3MB to represent `\w{20}`, it /// turns out that you only need a little more than 4KB to represent /// `(?-u:\w{20})`. So only use Unicode if you need it! pub fn size_limit(mut self, limit: Option<usize>) -> Config { self.size_limit = Some(limit); self } /// Returns the match semantics set in this configuration. pub fn get_match_kind(&self) -> MatchKind { self.match_kind.unwrap_or(MatchKind::LeftmostFirst) } /// Returns whether this configuration has enabled anchored starting states /// for every pattern in the DFA. pub fn get_starts_for_each_pattern(&self) -> bool { self.starts_for_each_pattern.unwrap_or(false) } /// Returns whether this configuration has enabled byte classes or not. /// This is typically a debugging oriented option, as disabling it confers /// no speed benefit. pub fn get_byte_classes(&self) -> bool { self.byte_classes.unwrap_or(true) } /// Returns the DFA size limit of this configuration if one was set. /// The size limit is total number of bytes on the heap that a DFA is /// permitted to use. If the DFA exceeds this limit during construction, /// then construction is stopped and an error is returned. pub fn get_size_limit(&self) -> Option<usize> { self.size_limit.unwrap_or(None) } /// Overwrite the default configuration such that the options in `o` are /// always used. If an option in `o` is not set, then the corresponding /// option in `self` is used. If it's not set in `self` either, then it /// remains not set. pub(crate) fn overwrite(&self, o: Config) -> Config { Config { match_kind: o.match_kind.or(self.match_kind), starts_for_each_pattern: o .starts_for_each_pattern .or(self.starts_for_each_pattern), byte_classes: o.byte_classes.or(self.byte_classes), size_limit: o.size_limit.or(self.size_limit), } } } /// A builder for a [one-pass DFA](DFA). /// /// This builder permits configuring options for the syntax of a pattern, the /// NFA construction and the DFA construction. This builder is different from a /// general purpose regex builder in that it permits fine grain configuration /// of the construction process. The trade off for this is complexity, and /// the possibility of setting a configuration that might not make sense. For /// example, there are two different UTF-8 modes: /// /// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls /// whether the pattern itself can contain sub-expressions that match invalid /// UTF-8. /// * [`thompson::Config::utf8`] controls whether empty matches that split a /// Unicode codepoint are reported or not. /// /// Generally speaking, callers will want to either enable all of these or /// disable all of these. /// /// # Example /// /// This example shows how to disable UTF-8 mode in the syntax and the NFA. /// This is generally what you want for matching on arbitrary bytes. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::onepass::DFA, /// nfa::thompson, /// util::syntax, /// Match, /// }; /// /// let re = DFA::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let haystack = b"foo\xFFarzz\xE2\x98\xFF\n"; /// re.captures(&mut cache, haystack, &mut caps); /// // Notice that `(?-u:[^b])` matches invalid UTF-8, /// // but the subsequent `.*` does not! Disabling UTF-8 /// // on the syntax permits this. /// // /// // N.B. This example does not show the impact of /// // disabling UTF-8 mode on a one-pass DFA Config, /// // since that only impacts regexes that can /// // produce matches of length 0. /// assert_eq!(Some(Match::must(0, 0..8)), caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Builder { config: Config, #[cfg(feature = "syntax")] thompson: thompson::Compiler, } impl Builder { /// Create a new one-pass DFA builder with the default configuration. pub fn new() -> Builder { Builder { config: Config::default(), #[cfg(feature = "syntax")] thompson: thompson::Compiler::new(), } } /// Build a one-pass DFA from the given pattern. /// /// If there was a problem parsing or compiling the pattern, then an error /// is returned. #[cfg(feature = "syntax")] pub fn build(&self, pattern: &str) -> Result<DFA, BuildError> { self.build_many(&[pattern]) } /// Build a one-pass DFA from the given patterns. /// /// When matches are returned, the pattern ID corresponds to the index of /// the pattern in the slice given. #[cfg(feature = "syntax")] pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<DFA, BuildError> { let nfa = self.thompson.build_many(patterns).map_err(BuildError::nfa)?; self.build_from_nfa(nfa) } /// Build a DFA from the given NFA. /// /// # Example /// /// This example shows how to build a DFA if you already have an NFA in /// hand. /// /// ``` /// use regex_automata::{dfa::onepass::DFA, nfa::thompson::NFA, Match}; /// /// // This shows how to set non-default options for building an NFA. /// let nfa = NFA::compiler() /// .configure(NFA::config().shrink(true)) /// .build(r"[a-z0-9]+")?; /// let re = DFA::builder().build_from_nfa(nfa)?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// re.captures(&mut cache, "foo123bar", &mut caps); /// assert_eq!(Some(Match::must(0, 0..9)), caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn build_from_nfa(&self, nfa: NFA) -> Result<DFA, BuildError> { // Why take ownership if we're just going to pass a reference to the // NFA to our internal builder? Well, the first thing to note is that // an NFA uses reference counting internally, so either choice is going // to be cheap. So there isn't much cost either way. // // The real reason is that a one-pass DFA, semantically, shares // ownership of an NFA. This is unlike other DFAs that don't share // ownership of an NFA at all, primarily because they want to be // self-contained in order to support cheap (de)serialization. // // But then why pass a '&nfa' below if we want to share ownership? // Well, it turns out that using a '&NFA' in our internal builder // separates its lifetime from the DFA we're building, and this turns // out to make code a bit more composable. e.g., We can iterate over // things inside the NFA while borrowing the builder as mutable because // we know the NFA cannot be mutated. So TL;DR --- this weirdness is // "because borrow checker." InternalBuilder::new(self.config.clone(), &nfa).build() } /// Apply the given one-pass DFA configuration options to this builder. pub fn configure(&mut self, config: Config) -> &mut Builder { self.config = self.config.overwrite(config); self } /// Set the syntax configuration for this builder using /// [`syntax::Config`](crate::util::syntax::Config). /// /// This permits setting things like case insensitivity, Unicode and multi /// line mode. /// /// These settings only apply when constructing a one-pass DFA directly /// from a pattern. #[cfg(feature = "syntax")] pub fn syntax( &mut self, config: crate::util::syntax::Config, ) -> &mut Builder { self.thompson.syntax(config); self } /// Set the Thompson NFA configuration for this builder using /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). /// /// This permits setting things like whether additional time should be /// spent shrinking the size of the NFA. /// /// These settings only apply when constructing a DFA directly from a /// pattern. #[cfg(feature = "syntax")] pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { self.thompson.configure(config); self } } /// An internal builder for encapsulating the state necessary to build a /// one-pass DFA. Typical use is just `InternalBuilder::new(..).build()`. /// /// There is no separate pass for determining whether the NFA is one-pass or /// not. We just try to build the DFA. If during construction we discover that /// it is not one-pass, we bail out. This is likely to lead to some undesirable /// expense in some cases, so it might make sense to try an identify common /// patterns in the NFA that make it definitively not one-pass. That way, we /// can avoid ever trying to build a one-pass DFA in the first place. For /// example, '\w*\s' is not one-pass, and since '\w' is Unicode-aware by /// default, it's probably not a trivial cost to try and build a one-pass DFA /// for it and then fail. /// /// Note that some (immutable) fields are duplicated here. For example, the /// 'nfa' and 'classes' fields are both in the 'DFA'. They are the same thing, /// but we duplicate them because it makes composition easier below. Otherwise, /// since the borrow checker can't see through method calls, the mutable borrow /// we use to mutate the DFA winds up preventing borrowing from any other part /// of the DFA, even though we aren't mutating those parts. We only do this /// because the duplication is cheap. #[derive(Debug)] struct InternalBuilder<'a> { /// The DFA we're building. dfa: DFA, /// An unordered collection of NFA state IDs that we haven't yet tried to /// build into a DFA state yet. /// /// This collection does not ultimately wind up including every NFA state /// ID. Instead, each ID represents a "start" state for a sub-graph of the /// NFA. The set of NFA states we then use to build a DFA state consists /// of that "start" state and all states reachable from it via epsilon /// transitions. uncompiled_nfa_ids: Vec<StateID>, /// A map from NFA state ID to DFA state ID. This is useful for easily /// determining whether an NFA state has been used as a "starting" point /// to build a DFA state yet. If it hasn't, then it is mapped to DEAD, /// and since DEAD is specially added and never corresponds to any NFA /// state, it follows that a mapping to DEAD implies the NFA state has /// no corresponding DFA state yet. nfa_to_dfa_id: Vec<StateID>, /// A stack used to traverse the NFA states that make up a single DFA /// state. Traversal occurs until the stack is empty, and we only push to /// the stack when the state ID isn't in 'seen'. Actually, even more than /// that, if we try to push something on to this stack that is already in /// 'seen', then we bail out on construction completely, since it implies /// that the NFA is not one-pass. stack: Vec<(StateID, Epsilons)>, /// The set of NFA states that we've visited via 'stack'. seen: SparseSet, /// Whether a match NFA state has been observed while constructing a /// one-pass DFA state. Once a match state is seen, assuming we are using /// leftmost-first match semantics, then we don't add any more transitions /// to the DFA state we're building. matched: bool, /// The config passed to the builder. /// /// This is duplicated in dfa.config. config: Config, /// The NFA we're building a one-pass DFA from. /// /// This is duplicated in dfa.nfa. nfa: &'a NFA, /// The equivalence classes that make up the alphabet for this DFA> /// /// This is duplicated in dfa.classes. classes: ByteClasses, } impl<'a> InternalBuilder<'a> { /// Create a new builder with an initial empty DFA. fn new(config: Config, nfa: &'a NFA) -> InternalBuilder { let classes = if !config.get_byte_classes() { // A one-pass DFA will always use the equivalence class map, but // enabling this option is useful for debugging. Namely, this will // cause all transitions to be defined over their actual bytes // instead of an opaque equivalence class identifier. The former is // much easier to grok as a human. ByteClasses::singletons() } else { nfa.byte_classes().clone() }; // Normally a DFA alphabet includes the EOI symbol, but we don't need // that in the one-pass DFA since we handle look-around explicitly // without encoding it into the DFA. Thus, we don't need to delay // matches by 1 byte. However, we reuse the space that *would* be used // by the EOI transition by putting match information there (like which // pattern matches and which look-around assertions need to hold). So // this means our real alphabet length is 1 fewer than what the byte // classes report, since we don't use EOI. let alphabet_len = classes.alphabet_len().checked_sub(1).unwrap(); let stride2 = classes.stride2(); let dfa = DFA { config: config.clone(), nfa: nfa.clone(), table: vec![], starts: vec![], // Since one-pass DFAs have a smaller state ID max than // StateID::MAX, it follows that StateID::MAX is a valid initial // value for min_match_id since no state ID can ever be greater // than it. In the case of a one-pass DFA with no match states, the // min_match_id will keep this sentinel value. min_match_id: StateID::MAX, classes: classes.clone(), alphabet_len, stride2, pateps_offset: alphabet_len, // OK because PatternID::MAX*2 is guaranteed not to overflow. explicit_slot_start: nfa.pattern_len().checked_mul(2).unwrap(), }; InternalBuilder { dfa, uncompiled_nfa_ids: vec![], nfa_to_dfa_id: vec![DEAD; nfa.states().len()], stack: vec![], seen: SparseSet::new(nfa.states().len()), matched: false, config, nfa, classes, } } /// Build the DFA from the NFA given to this builder. If the NFA is not /// one-pass, then return an error. An error may also be returned if a /// particular limit is exceeded. (Some limits, like the total heap memory /// used, are configurable. Others, like the total patterns or slots, are /// hard-coded based on representational limitations.) fn build(mut self) -> Result<DFA, BuildError> { self.nfa.look_set_any().available().map_err(BuildError::word)?; for look in self.nfa.look_set_any().iter() { // This is a future incompatibility check where if we add any // more look-around assertions, then the one-pass DFA either // needs to reject them (what we do here) or it needs to have its // Transition representation modified to be capable of storing the // new assertions. if look.as_repr() > Look::WordUnicodeNegate.as_repr() { return Err(BuildError::unsupported_look(look)); } } if self.nfa.pattern_len().as_u64() > PatternEpsilons::PATTERN_ID_LIMIT { return Err(BuildError::too_many_patterns( PatternEpsilons::PATTERN_ID_LIMIT, )); } if self.nfa.group_info().explicit_slot_len() > Slots::LIMIT { return Err(BuildError::not_one_pass( "too many explicit capturing groups (max is 16)", )); } assert_eq!(DEAD, self.add_empty_state()?); // This is where the explicit slots start. We care about this because // we only need to track explicit slots. The implicit slots---two for // each pattern---are tracked as part of the search routine itself. let explicit_slot_start = self.nfa.pattern_len() * 2; self.add_start_state(None, self.nfa.start_anchored())?; if self.config.get_starts_for_each_pattern() { for pid in self.nfa.patterns() { self.add_start_state( Some(pid), self.nfa.start_pattern(pid).unwrap(), )?; } } // NOTE: One wonders what the effects of treating 'uncompiled_nfa_ids' // as a stack are. It is really an unordered *set* of NFA state IDs. // If it, for example, in practice led to discovering whether a regex // was or wasn't one-pass later than if we processed NFA state IDs in // ascending order, then that would make this routine more costly in // the somewhat common case of a regex that isn't one-pass. while let Some(nfa_id) = self.uncompiled_nfa_ids.pop() { let dfa_id = self.nfa_to_dfa_id[nfa_id]; // Once we see a match, we keep going, but don't add any new // transitions. Normally we'd just stop, but we have to keep // going in order to verify that our regex is actually one-pass. self.matched = false; // The NFA states we've already explored for this DFA state. self.seen.clear(); // The NFA states to explore via epsilon transitions. If we ever // try to push an NFA state that we've already seen, then the NFA // is not one-pass because it implies there are multiple epsilon // transition paths that lead to the same NFA state. In other // words, there is ambiguity. self.stack_push(nfa_id, Epsilons::empty())?; while let Some((id, epsilons)) = self.stack.pop() { match *self.nfa.state(id) { thompson::State::ByteRange { ref trans } => { self.compile_transition(dfa_id, trans, epsilons)?; } thompson::State::Sparse(ref sparse) => { for trans in sparse.transitions.iter() { self.compile_transition(dfa_id, trans, epsilons)?; } } thompson::State::Dense(ref dense) => { for trans in dense.iter() { self.compile_transition(dfa_id, &trans, epsilons)?; } } thompson::State::Look { look, next } => { let looks = epsilons.looks().insert(look); self.stack_push(next, epsilons.set_looks(looks))?; } thompson::State::Union { ref alternates } => { for &sid in alternates.iter().rev() { self.stack_push(sid, epsilons)?; } } thompson::State::BinaryUnion { alt1, alt2 } => { self.stack_push(alt2, epsilons)?; self.stack_push(alt1, epsilons)?; } thompson::State::Capture { next, slot, .. } => { let slot = slot.as_usize(); let epsilons = if slot < explicit_slot_start { // If this is an implicit slot, we don't care // about it, since we handle implicit slots in // the search routine. We can get away with that // because there are 2 implicit slots for every // pattern. epsilons } else { // Offset our explicit slots so that they start // at index 0. let offset = slot - explicit_slot_start; epsilons.set_slots(epsilons.slots().insert(offset)) }; self.stack_push(next, epsilons)?; } thompson::State::Fail => { continue; } thompson::State::Match { pattern_id } => { // If we found two different paths to a match state // for the same DFA state, then we have ambiguity. // Thus, it's not one-pass. if self.matched { return Err(BuildError::not_one_pass( "multiple epsilon transitions to match state", )); } self.matched = true; // Shove the matching pattern ID and the 'epsilons' // into the current DFA state's pattern epsilons. The // 'epsilons' includes the slots we need to capture // before reporting the match and also the conditional // epsilon transitions we need to check before we can // report a match. self.dfa.set_pattern_epsilons( dfa_id, PatternEpsilons::empty() .set_pattern_id(pattern_id) .set_epsilons(epsilons), ); // N.B. It is tempting to just bail out here when // compiling a leftmost-first DFA, since we will never // compile any more transitions in that case. But we // actually need to keep going in order to verify that // we actually have a one-pass regex. e.g., We might // see more Match states (e.g., for other patterns) // that imply that we don't have a one-pass regex. // So instead, we mark that we've found a match and // continue on. When we go to compile a new DFA state, // we just skip that part. But otherwise check that the // one-pass property is upheld. } } } } self.shuffle_states(); Ok(self.dfa) } /// Shuffle all match states to the end of the transition table and set /// 'min_match_id' to the ID of the first such match state. /// /// The point of this is to make it extremely cheap to determine whether /// a state is a match state or not. We need to check on this on every /// transition during a search, so it being cheap is important. This /// permits us to check it by simply comparing two state identifiers, as /// opposed to looking for the pattern ID in the state's `PatternEpsilons`. /// (Which requires a memory load and some light arithmetic.) fn shuffle_states(&mut self) { let mut remapper = Remapper::new(&self.dfa); let mut next_dest = self.dfa.last_state_id(); for i in (0..self.dfa.state_len()).rev() { let id = StateID::must(i); let is_match = self.dfa.pattern_epsilons(id).pattern_id().is_some(); if !is_match { continue; } remapper.swap(&mut self.dfa, next_dest, id); self.dfa.min_match_id = next_dest; next_dest = self.dfa.prev_state_id(next_dest).expect( "match states should be a proper subset of all states", ); } remapper.remap(&mut self.dfa); } /// Compile the given NFA transition into the DFA state given. /// /// 'Epsilons' corresponds to any conditional epsilon transitions that need /// to be satisfied to follow this transition, and any slots that need to /// be saved if the transition is followed. /// /// If this transition indicates that the NFA is not one-pass, then /// this returns an error. (This occurs, for example, if the DFA state /// already has a transition defined for the same input symbols as the /// given transition, *and* the result of the old and new transitions is /// different.) fn compile_transition( &mut self, dfa_id: StateID, trans: &thompson::Transition, epsilons: Epsilons, ) -> Result<(), BuildError> { let next_dfa_id = self.add_dfa_state_for_nfa_state(trans.next)?; for byte in self .classes .representatives(trans.start..=trans.end) .filter_map(|r| r.as_u8()) { let oldtrans = self.dfa.transition(dfa_id, byte); let newtrans = Transition::new(self.matched, next_dfa_id, epsilons); // If the old transition points to the DEAD state, then we know // 'byte' has not been mapped to any transition for this DFA state // yet. So set it unconditionally. Otherwise, we require that the // old and new transitions are equivalent. Otherwise, there is // ambiguity and thus the regex is not one-pass. if oldtrans.state_id() == DEAD { self.dfa.set_transition(dfa_id, byte, newtrans); } else if oldtrans != newtrans { return Err(BuildError::not_one_pass( "conflicting transition", )); } } Ok(()) } /// Add a start state to the DFA corresponding to the given NFA starting /// state ID. /// /// If adding a state would blow any limits (configured or hard-coded), /// then an error is returned. /// /// If the starting state is an anchored state for a particular pattern, /// then callers must provide the pattern ID for that starting state. /// Callers must also ensure that the first starting state added is the /// start state for all patterns, and then each anchored starting state for /// each pattern (if necessary) added in order. Otherwise, this panics. fn add_start_state( &mut self, pid: Option<PatternID>, nfa_id: StateID, ) -> Result<StateID, BuildError> { match pid { // With no pid, this should be the start state for all patterns // and thus be the first one. None => assert!(self.dfa.starts.is_empty()), // With a pid, we want it to be at self.dfa.starts[pid+1]. Some(pid) => assert!(self.dfa.starts.len() == pid.one_more()), } let dfa_id = self.add_dfa_state_for_nfa_state(nfa_id)?; self.dfa.starts.push(dfa_id); Ok(dfa_id) } /// Add a new DFA state corresponding to the given NFA state. If adding a /// state would blow any limits (configured or hard-coded), then an error /// is returned. If a DFA state already exists for the given NFA state, /// then that DFA state's ID is returned and no new states are added. /// /// It is not expected that this routine is called for every NFA state. /// Instead, an NFA state ID will usually correspond to the "start" state /// for a sub-graph of the NFA, where all states in the sub-graph are /// reachable via epsilon transitions (conditional or unconditional). That /// sub-graph of NFA states is ultimately what produces a single DFA state. fn add_dfa_state_for_nfa_state( &mut self, nfa_id: StateID, ) -> Result<StateID, BuildError> { // If we've already built a DFA state for the given NFA state, then // just return that. We definitely do not want to have more than one // DFA state in existence for the same NFA state, since all but one of // them will likely become unreachable. And at least some of them are // likely to wind up being incomplete. let existing_dfa_id = self.nfa_to_dfa_id[nfa_id]; if existing_dfa_id != DEAD { return Ok(existing_dfa_id); } // If we don't have any DFA state yet, add it and then add the given // NFA state to the list of states to explore. let dfa_id = self.add_empty_state()?; self.nfa_to_dfa_id[nfa_id] = dfa_id; self.uncompiled_nfa_ids.push(nfa_id); Ok(dfa_id) } /// Unconditionally add a new empty DFA state. If adding it would exceed /// any limits (configured or hard-coded), then an error is returned. The /// ID of the new state is returned on success. /// /// The added state is *not* a match state. fn add_empty_state(&mut self) -> Result<StateID, BuildError> { let state_limit = Transition::STATE_ID_LIMIT; // Note that unlike dense and lazy DFAs, we specifically do NOT // premultiply our state IDs here. The reason is that we want to pack // our state IDs into 64-bit transitions with other info, so the fewer // the bits we use for state IDs the better. If we premultiply, then // our state ID space shrinks. We justify this by the assumption that // a one-pass DFA is just already doing a fair bit more work than a // normal DFA anyway, so an extra multiplication to compute a state // transition doesn't seem like a huge deal. let next_id = self.dfa.table.len() >> self.dfa.stride2(); let id = StateID::new(next_id) .map_err(|_| BuildError::too_many_states(state_limit))?; if id.as_u64() > Transition::STATE_ID_LIMIT { return Err(BuildError::too_many_states(state_limit)); } self.dfa .table .extend(core::iter::repeat(Transition(0)).take(self.dfa.stride())); // The default empty value for 'PatternEpsilons' is sadly not all // zeroes. Instead, a special sentinel is used to indicate that there // is no pattern. So we need to explicitly set the pattern epsilons to // the correct "empty" PatternEpsilons. self.dfa.set_pattern_epsilons(id, PatternEpsilons::empty()); if let Some(size_limit) = self.config.get_size_limit() { if self.dfa.memory_usage() > size_limit { return Err(BuildError::exceeded_size_limit(size_limit)); } } Ok(id) } /// Push the given NFA state ID and its corresponding epsilons (slots and /// conditional epsilon transitions) on to a stack for use in a depth first /// traversal of a sub-graph of the NFA. /// /// If the given NFA state ID has already been pushed on to the stack, then /// it indicates the regex is not one-pass and this correspondingly returns /// an error. fn stack_push( &mut self, nfa_id: StateID, epsilons: Epsilons, ) -> Result<(), BuildError> { // If we already have seen a match and we are compiling a leftmost // first DFA, then we shouldn't add any more states to look at. This is // effectively how preference order and non-greediness is implemented. // if !self.config.get_match_kind().continue_past_first_match() // && self.matched // { // return Ok(()); // } if !self.seen.insert(nfa_id) { return Err(BuildError::not_one_pass( "multiple epsilon transitions to same state", )); } self.stack.push((nfa_id, epsilons)); Ok(()) } } /// A one-pass DFA for executing a subset of anchored regex searches while /// resolving capturing groups. /// /// A one-pass DFA can be built from an NFA that is one-pass. An NFA is /// one-pass when there is never any ambiguity about how to continue a search. /// For example, `a*a` is not one-pass becuase during a search, it's not /// possible to know whether to continue matching the `a*` or to move on to /// the single `a`. However, `a*b` is one-pass, because for every byte in the /// input, it's always clear when to move on from `a*` to `b`. /// /// # Only anchored searches are supported /// /// In this crate, especially for DFAs, unanchored searches are implemented by /// treating the pattern as if it had a `(?s-u:.)*?` prefix. While the prefix /// is one-pass on its own, adding anything after it, e.g., `(?s-u:.)*?a` will /// make the overall pattern not one-pass. Why? Because the `(?s-u:.)` matches /// any byte, and there is therefore ambiguity as to when the prefix should /// stop matching and something else should start matching. /// /// Therefore, one-pass DFAs do not support unanchored searches. In addition /// to many regexes simply not being one-pass, it implies that one-pass DFAs /// have limited utility. With that said, when a one-pass DFA can be used, it /// can potentially provide a dramatic speed up over alternatives like the /// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker) /// and the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM). In particular, /// a one-pass DFA is the only DFA capable of reporting the spans of matching /// capturing groups. /// /// To clarify, when we say that unanchored searches are not supported, what /// that actually means is: /// /// * The high level routines, [`DFA::is_match`] and [`DFA::captures`], always /// do anchored searches. /// * Since iterators are most useful in the context of unanchored searches, /// there is no `DFA::captures_iter` method. /// * For lower level routines like [`DFA::try_search`], an error will be /// returned if the given [`Input`] is configured to do an unanchored search or /// search for an invalid pattern ID. (Note that an [`Input`] is configured to /// do an unanchored search by default, so just giving a `Input::new` is /// guaranteed to return an error.) /// /// # Other limitations /// /// In addition to the [configurable heap limit](Config::size_limit) and /// the requirement that a regex pattern be one-pass, there are some other /// limitations: /// /// * There is an internal limit on the total number of explicit capturing /// groups that appear across all patterns. It is somewhat small and there is /// no way to configure it. If your pattern(s) exceed this limit, then building /// a one-pass DFA will fail. /// * If the number of patterns exceeds an internal unconfigurable limit, then /// building a one-pass DFA will fail. This limit is quite large and you're /// unlikely to hit it. /// * If the total number of states exceeds an internal unconfigurable limit, /// then building a one-pass DFA will fail. This limit is quite large and /// you're unlikely to hit it. /// /// # Other examples of regexes that aren't one-pass /// /// One particularly unfortunate example is that enabling Unicode can cause /// regexes that were one-pass to no longer be one-pass. Consider the regex /// `(?-u)\w*\s` for example. It is one-pass because there is exactly no /// overlap between the ASCII definitions of `\w` and `\s`. But `\w*\s` /// (i.e., with Unicode enabled) is *not* one-pass because `\w` and `\s` get /// translated to UTF-8 automatons. And while the *codepoints* in `\w` and `\s` /// do not overlap, the underlying UTF-8 encodings do. Indeed, because of the /// overlap between UTF-8 automata, the use of Unicode character classes will /// tend to vastly increase the likelihood of a regex not being one-pass. /// /// # How does one know if a regex is one-pass or not? /// /// At the time of writing, the only way to know is to try and build a one-pass /// DFA. The one-pass property is checked while constructing the DFA. /// /// This does mean that you might potentially waste some CPU cycles and memory /// by optimistically trying to build a one-pass DFA. But this is currently the /// only way. In the future, building a one-pass DFA might be able to use some /// heuristics to detect common violations of the one-pass property and bail /// more quickly. /// /// # Resource usage /// /// Unlike a general DFA, a one-pass DFA has stricter bounds on its resource /// usage. Namely, construction of a one-pass DFA has a time and space /// complexity of `O(n)`, where `n ~ nfa.states().len()`. (A general DFA's time /// and space complexity is `O(2^n)`.) This smaller time bound is achieved /// because there is at most one DFA state created for each NFA state. If /// additional DFA states would be required, then the pattern is not one-pass /// and construction will fail. /// /// Note though that currently, this DFA uses a fully dense representation. /// This means that while its space complexity is no worse than an NFA, it may /// in practice use more memory because of higher constant factors. The reason /// for this trade off is two-fold. Firstly, a dense representation makes the /// search faster. Secondly, the bigger an NFA, the more unlikely it is to be /// one-pass. Therefore, most one-pass DFAs are usually pretty small. /// /// # Example /// /// This example shows that the one-pass DFA implements Unicode word boundaries /// correctly while simultaneously reporting spans for capturing groups that /// participate in a match. (This is the only DFA that implements full support /// for Unicode word boundaries.) /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::onepass::DFA, Match, Span}; /// /// let re = DFA::new(r"\b(?P<first>\w+)[[:space:]]+(?P<last>\w+)\b")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "<NAME>", &mut caps); /// assert_eq!(Some(Match::must(0, 0..23)), caps.get_match()); /// assert_eq!(Some(Span::from(0..12)), caps.get_group_by_name("first")); /// assert_eq!(Some(Span::from(13..23)), caps.get_group_by_name("last")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: iteration /// /// Unlike other regex engines in this crate, this one does not provide /// iterator search functions. This is because a one-pass DFA only supports /// anchored searches, and so iterator functions are generally not applicable. /// /// However, if you know that all of your matches are /// directly adjacent, then an iterator can be used. The /// [`util::iter::Searcher`](crate::util::iter::Searcher) type can be used for /// this purpose: /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::onepass::DFA, /// util::iter::Searcher, /// Anchored, Input, Span, /// }; /// /// let re = DFA::new(r"\w(\d)\w")?; /// let (mut cache, caps) = (re.create_cache(), re.create_captures()); /// let input = Input::new("a1zb2yc3x").anchored(Anchored::Yes); /// /// let mut it = Searcher::new(input).into_captures_iter(caps, |input, caps| { /// Ok(re.try_search(&mut cache, input, caps)?) /// }).infallible(); /// let caps0 = it.next().unwrap(); /// assert_eq!(Some(Span::from(1..2)), caps0.get_group(1)); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone)] pub struct DFA { /// The configuration provided by the caller. config: Config, /// The NFA used to build this DFA. /// /// NOTE: We probably don't need to store the NFA here, but we use enough /// bits from it that it's convenient to do so. And there really isn't much /// cost to doing so either, since an NFA is reference counted internally. nfa: NFA, /// The transition table. Given a state ID 's' and a byte of haystack 'b', /// the next state is `table[sid + classes[byte]]`. /// /// The stride of this table (i.e., the number of columns) is always /// a power of 2, even if the alphabet length is smaller. This makes /// converting between state IDs and state indices very cheap. /// /// Note that the stride always includes room for one extra "transition" /// that isn't actually a transition. It is a 'PatternEpsilons' that is /// used for match states only. Because of this, the maximum number of /// active columns in the transition table is 257, which means the maximum /// stride is 512 (the next power of 2 greater than or equal to 257). table: Vec<Transition>, /// The DFA state IDs of the starting states. /// /// `starts[0]` is always present and corresponds to the starting state /// when searching for matches of any pattern in the DFA. /// /// `starts[i]` where i>0 corresponds to the starting state for the pattern /// ID 'i-1'. These starting states are optional. starts: Vec<StateID>, /// Every state ID >= this value corresponds to a match state. /// /// This is what a search uses to detect whether a state is a match state /// or not. It requires only a simple comparison instead of bit-unpacking /// the PatternEpsilons from every state. min_match_id: StateID, /// The alphabet of this DFA, split into equivalence classes. Bytes in the /// same equivalence class can never discriminate between a match and a /// non-match. classes: ByteClasses, /// The number of elements in each state in the transition table. This may /// be less than the stride, since the stride is always a power of 2 and /// the alphabet length can be anything up to and including 256. alphabet_len: usize, /// The number of columns in the transition table, expressed as a power of /// 2. stride2: usize, /// The offset at which the PatternEpsilons for a match state is stored in /// the transition table. /// /// PERF: One wonders whether it would be better to put this in a separate /// allocation, since only match states have a non-empty PatternEpsilons /// and the number of match states tends be dwarfed by the number of /// non-match states. So this would save '8*len(non_match_states)' for each /// DFA. The question is whether moving this to a different allocation will /// lead to a perf hit during searches. You might think dealing with match /// states is rare, but some regexes spend a lot of time in match states /// gobbling up input. But... match state handling is already somewhat /// expensive, so maybe this wouldn't do much? Either way, it's worth /// experimenting. pateps_offset: usize, /// The first explicit slot index. This refers to the first slot appearing /// immediately after the last implicit slot. It is always 'patterns.len() /// * 2'. /// /// We record this because we only store the explicit slots in our DFA /// transition table that need to be saved. Implicit slots are handled /// automatically as part of the search. explicit_slot_start: usize, } impl DFA { /// Parse the given regular expression using the default configuration and /// return the corresponding one-pass DFA. /// /// If you want a non-default configuration, then use the [`Builder`] to /// set your own configuration. /// /// # Example /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Match}; /// /// let re = DFA::new("foo[0-9]+bar")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "foo12345barzzz", &mut caps); /// assert_eq!(Some(Match::must(0, 0..11)), caps.get_match()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] #[inline] pub fn new(pattern: &str) -> Result<DFA, BuildError> { DFA::builder().build(pattern) } /// Like `new`, but parses multiple patterns into a single "multi regex." /// This similarly uses the default regex configuration. /// /// # Example /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Match}; /// /// let re = DFA::new_many(&["[a-z]+", "[0-9]+"])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "abc123", &mut caps); /// assert_eq!(Some(Match::must(0, 0..3)), caps.get_match()); /// /// re.captures(&mut cache, "123abc", &mut caps); /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[cfg(feature = "syntax")] #[inline] pub fn new_many<P: AsRef<str>>(patterns: &[P]) -> Result<DFA, BuildError> { DFA::builder().build_many(patterns) } /// Like `new`, but builds a one-pass DFA directly from an NFA. This is /// useful if you already have an NFA, or even if you hand-assembled the /// NFA. /// /// # Example /// /// This shows how to hand assemble a regular expression via its HIR, /// compile an NFA from it and build a one-pass DFA from the NFA. /// /// ``` /// use regex_automata::{ /// dfa::onepass::DFA, /// nfa::thompson::NFA, /// Match, /// }; /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; /// /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ /// ClassBytesRange::new(b'0', b'9'), /// ClassBytesRange::new(b'A', b'Z'), /// ClassBytesRange::new(b'_', b'_'), /// ClassBytesRange::new(b'a', b'z'), /// ]))); /// /// let config = NFA::config().nfa_size_limit(Some(1_000)); /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; /// /// let re = DFA::new_from_nfa(nfa)?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let expected = Some(Match::must(0, 0..1)); /// re.captures(&mut cache, "A", &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn new_from_nfa(nfa: NFA) -> Result<DFA, BuildError> { DFA::builder().build_from_nfa(nfa) } /// Create a new one-pass DFA that matches every input. /// /// # Example /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Match}; /// /// let dfa = DFA::always_match()?; /// let mut cache = dfa.create_cache(); /// let mut caps = dfa.create_captures(); /// /// let expected = Match::must(0, 0..0); /// dfa.captures(&mut cache, "", &mut caps); /// assert_eq!(Some(expected), caps.get_match()); /// dfa.captures(&mut cache, "foo", &mut caps); /// assert_eq!(Some(expected), caps.get_match()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn always_match() -> Result<DFA, BuildError> { let nfa = thompson::NFA::always_match(); Builder::new().build_from_nfa(nfa) } /// Create a new one-pass DFA that never matches any input. /// /// # Example /// /// ``` /// use regex_automata::dfa::onepass::DFA; /// /// let dfa = DFA::never_match()?; /// let mut cache = dfa.create_cache(); /// let mut caps = dfa.create_captures(); /// /// dfa.captures(&mut cache, "", &mut caps); /// assert_eq!(None, caps.get_match()); /// dfa.captures(&mut cache, "foo", &mut caps); /// assert_eq!(None, caps.get_match()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn never_match() -> Result<DFA, BuildError> { let nfa = thompson::NFA::never_match(); Builder::new().build_from_nfa(nfa) } /// Return a default configuration for a DFA. /// /// This is a convenience routine to avoid needing to import the `Config` /// type when customizing the construction of a DFA. /// /// # Example /// /// This example shows how to change the match semantics of this DFA from /// its default "leftmost first" to "all." When using "all," non-greediness /// doesn't apply and neither does preference order matching. Instead, the /// longest match possible is always returned. (Although, by construction, /// it's impossible for a one-pass DFA to have a different answer for /// "preference order" vs "longest match.") /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Match, MatchKind}; /// /// let re = DFA::builder() /// .configure(DFA::config().match_kind(MatchKind::All)) /// .build(r"(abc)+?")?; /// let mut cache = re.create_cache(); /// let mut caps = re.create_captures(); /// /// re.captures(&mut cache, "abcabc", &mut caps); /// // Normally, the non-greedy repetition would give us a 0..3 match. /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn config() -> Config { Config::new() } /// Return a builder for configuring the construction of a DFA. /// /// This is a convenience routine to avoid needing to import the /// [`Builder`] type in common cases. /// /// # Example /// /// This example shows how to use the builder to disable UTF-8 mode. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// dfa::onepass::DFA, /// nfa::thompson, /// util::syntax, /// Match, /// }; /// /// let re = DFA::builder() /// .syntax(syntax::Config::new().utf8(false)) /// .thompson(thompson::Config::new().utf8(false)) /// .build(r"foo(?-u:[^b])ar.*")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// let haystack = b"foo\xFFarzz\xE2\x98\xFF\n"; /// let expected = Some(Match::must(0, 0..8)); /// re.captures(&mut cache, haystack, &mut caps); /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn builder() -> Builder { Builder::new() } /// Create a new empty set of capturing groups that is guaranteed to be /// valid for the search APIs on this DFA. /// /// A `Captures` value created for a specific DFA cannot be used with any /// other DFA. /// /// This is a convenience function for [`Captures::all`]. See the /// [`Captures`] documentation for an explanation of its alternative /// constructors that permit the DFA to do less work during a search, and /// thus might make it faster. #[inline] pub fn create_captures(&self) -> Captures { Captures::all(self.nfa.group_info().clone()) } /// Create a new cache for this DFA. /// /// The cache returned should only be used for searches for this /// DFA. If you want to reuse the cache for another DFA, then you /// must call [`Cache::reset`] with that DFA (or, equivalently, /// [`DFA::reset_cache`]). #[inline] pub fn create_cache(&self) -> Cache { Cache::new(self) } /// Reset the given cache such that it can be used for searching with the /// this DFA (and only this DFA). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different DFA. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different DFA. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::onepass::DFA, Match}; /// /// let re1 = DFA::new(r"\w")?; /// let re2 = DFA::new(r"\W")?; /// let mut caps1 = re1.create_captures(); /// let mut caps2 = re2.create_captures(); /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Match::must(0, 0..2)), /// { re1.captures(&mut cache, "Δ", &mut caps1); caps1.get_match() }, /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the one-pass DFA we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// re2.reset_cache(&mut cache); /// assert_eq!( /// Some(Match::must(0, 0..3)), /// { re2.captures(&mut cache, "☃", &mut caps2); caps2.get_match() }, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn reset_cache(&self, cache: &mut Cache) { cache.reset(self); } /// Return the config for this one-pass DFA. #[inline] pub fn get_config(&self) -> &Config { &self.config } /// Returns a reference to the underlying NFA. #[inline] pub fn get_nfa(&self) -> &NFA { &self.nfa } /// Returns the total number of patterns compiled into this DFA. /// /// In the case of a DFA that contains no patterns, this returns `0`. #[inline] pub fn pattern_len(&self) -> usize { self.get_nfa().pattern_len() } /// Returns the total number of states in this one-pass DFA. /// /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose /// a low level DFA API. Therefore, this routine has little use other than /// being informational. #[inline] pub fn state_len(&self) -> usize { self.table.len() >> self.stride2() } /// Returns the total number of elements in the alphabet for this DFA. /// /// That is, this returns the total number of transitions that each /// state in this DFA must have. The maximum alphabet size is 256, which /// corresponds to each possible byte value. /// /// The alphabet size may be less than 256 though, and unless /// [`Config::byte_classes`] is disabled, it is typically must less than /// 256. Namely, bytes are grouped into equivalence classes such that no /// two bytes in the same class can distinguish a match from a non-match. /// For example, in the regex `^[a-z]+$`, the ASCII bytes `a-z` could /// all be in the same equivalence class. This leads to a massive space /// savings. /// /// Note though that the alphabet length does _not_ necessarily equal the /// total stride space taken up by a single DFA state in the transition /// table. Namely, for performance reasons, the stride is always the /// smallest power of two that is greater than or equal to the alphabet /// length. For this reason, [`DFA::stride`] or [`DFA::stride2`] are /// often more useful. The alphabet length is typically useful only for /// informational purposes. /// /// Note also that unlike dense or sparse DFAs, a one-pass DFA does /// not have a special end-of-input (EOI) transition. This is because /// a one-pass DFA handles look-around assertions explicitly (like the /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM)) and does not build /// them into the transitions of the DFA. #[inline] pub fn alphabet_len(&self) -> usize { self.alphabet_len } /// Returns the total stride for every state in this DFA, expressed as the /// exponent of a power of 2. The stride is the amount of space each state /// takes up in the transition table, expressed as a number of transitions. /// (Unused transitions map to dead states.) /// /// The stride of a DFA is always equivalent to the smallest power of /// 2 that is greater than or equal to the DFA's alphabet length. This /// definition uses extra space, but possibly permits faster translation /// between state identifiers and their corresponding offsets in this DFA's /// transition table. /// /// For example, if the DFA's stride is 16 transitions, then its `stride2` /// is `4` since `2^4 = 16`. /// /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) /// while the maximum `stride2` value is `9` (corresponding to a stride /// of `512`). The maximum in theory should be `8`, but because of some /// implementation quirks that may be relaxed in the future, it is one more /// than `8`. (Do note that a maximal stride is incredibly rare, as it /// would imply that there is almost no redundant in the regex pattern.) /// /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose /// a low level DFA API. Therefore, this routine has little use other than /// being informational. #[inline] pub fn stride2(&self) -> usize { self.stride2 } /// Returns the total stride for every state in this DFA. This corresponds /// to the total number of transitions used by each state in this DFA's /// transition table. /// /// Please see [`DFA::stride2`] for more information. In particular, this /// returns the stride as the number of transitions, where as `stride2` /// returns it as the exponent of a power of 2. /// /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose /// a low level DFA API. Therefore, this routine has little use other than /// being informational. #[inline] pub fn stride(&self) -> usize { 1 << self.stride2() } /// Returns the memory usage, in bytes, of this DFA. /// /// The memory usage is computed based on the number of bytes used to /// represent this DFA. /// /// This does **not** include the stack size used up by this DFA. To /// compute that, use `std::mem::size_of::<onepass::DFA>()`. #[inline] pub fn memory_usage(&self) -> usize { use core::mem::size_of; self.table.len() * size_of::<Transition>() + self.starts.len() * size_of::<StateID>() } } impl DFA { /// Executes an anchored leftmost forward search, and returns true if and /// only if this one-pass DFA matches the given haystack. /// /// This routine may short circuit if it knows that scanning future /// input will never lead to a different result. In particular, if the /// underlying DFA enters a match state, then this routine will return /// `true` immediately without inspecting any future input. (Consider how /// this might make a difference given the regex `a+` on the haystack /// `aaaaaaaaaaaaaaa`. This routine can stop after it sees the first `a`, /// but routines like `find` need to continue searching because `+` is /// greedy by default.) /// /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the /// given configuration was [`Anchored::No`] (which is the default). /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in the following circumstances: /// /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. Concretely, /// this occurs when using [`Anchored::Pattern`] without enabling /// [`Config::starts_for_each_pattern`]. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// Use [`DFA::try_search`] if you want to handle these panics as error /// values instead. /// /// # Example /// /// This shows basic usage: /// /// ``` /// use regex_automata::dfa::onepass::DFA; /// /// let re = DFA::new("foo[0-9]+bar")?; /// let mut cache = re.create_cache(); /// /// assert!(re.is_match(&mut cache, "foo12345bar")); /// assert!(!re.is_match(&mut cache, "foobar")); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: consistency with search APIs /// /// `is_match` is guaranteed to return `true` whenever `captures` returns /// a match. This includes searches that are executed entirely within a /// codepoint: /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Input}; /// /// let re = DFA::new("a*")?; /// let mut cache = re.create_cache(); /// /// assert!(!re.is_match(&mut cache, Input::new("☃").span(1..2))); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// Notice that when UTF-8 mode is disabled, then the above reports a /// match because the restriction against zero-width matches that split a /// codepoint has been lifted: /// /// ``` /// use regex_automata::{dfa::onepass::DFA, nfa::thompson::NFA, Input}; /// /// let re = DFA::builder() /// .thompson(NFA::config().utf8(false)) /// .build("a*")?; /// let mut cache = re.create_cache(); /// /// assert!(re.is_match(&mut cache, Input::new("☃").span(1..2))); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn is_match<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> bool { let mut input = input.into().earliest(true); if matches!(input.get_anchored(), Anchored::No) { input.set_anchored(Anchored::Yes); } self.try_search_slots(cache, &input, &mut []).unwrap().is_some() } /// Executes an anchored leftmost forward search, and returns a `Match` if /// and only if this one-pass DFA matches the given haystack. /// /// This routine only includes the overall match span. To get access to the /// individual spans of each capturing group, use [`DFA::captures`]. /// /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the /// given configuration was [`Anchored::No`] (which is the default). /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in the following circumstances: /// /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. Concretely, /// this occurs when using [`Anchored::Pattern`] without enabling /// [`Config::starts_for_each_pattern`]. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// Use [`DFA::try_search`] if you want to handle these panics as error /// values instead. /// /// # Example /// /// Leftmost first match semantics corresponds to the match with the /// smallest starting offset, but where the end offset is determined by /// preferring earlier branches in the original regular expression. For /// example, `Sam|Samwise` will match `Sam` in `Samwise`, but `Samwise|Sam` /// will match `Samwise` in `Samwise`. /// /// Generally speaking, the "leftmost first" match is how most backtracking /// regular expressions tend to work. This is in contrast to POSIX-style /// regular expressions that yield "leftmost longest" matches. Namely, /// both `Sam|Samwise` and `Samwise|Sam` match `Samwise` when using /// leftmost longest semantics. (This crate does not currently support /// leftmost longest semantics.) /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Match}; /// /// let re = DFA::new("foo[0-9]+")?; /// let mut cache = re.create_cache(); /// let expected = Match::must(0, 0..8); /// assert_eq!(Some(expected), re.find(&mut cache, "foo12345")); /// /// // Even though a match is found after reading the first byte (`a`), /// // the leftmost first match semantics demand that we find the earliest /// // match that prefers earlier parts of the pattern over later parts. /// let re = DFA::new("abc|a")?; /// let mut cache = re.create_cache(); /// let expected = Match::must(0, 0..3); /// assert_eq!(Some(expected), re.find(&mut cache, "abc")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn find<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, ) -> Option<Match> { let mut input = input.into(); if matches!(input.get_anchored(), Anchored::No) { input.set_anchored(Anchored::Yes); } if self.get_nfa().pattern_len() == 1 { let mut slots = [None, None]; let pid = self.try_search_slots(cache, &input, &mut slots).unwrap()?; let start = slots[0].unwrap().get(); let end = slots[1].unwrap().get(); return Some(Match::new(pid, Span { start, end })); } let ginfo = self.get_nfa().group_info(); let slots_len = ginfo.implicit_slot_len(); let mut slots = vec![None; slots_len]; let pid = self.try_search_slots(cache, &input, &mut slots).unwrap()?; let start = slots[pid.as_usize() * 2].unwrap().get(); let end = slots[pid.as_usize() * 2 + 1].unwrap().get(); Some(Match::new(pid, Span { start, end })) } /// Executes an anchored leftmost forward search and writes the spans /// of capturing groups that participated in a match into the provided /// [`Captures`] value. If no match was found, then [`Captures::is_match`] /// is guaranteed to return `false`. /// /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the /// given configuration was [`Anchored::No`] (which is the default). /// /// # Panics /// /// This routine panics if the search could not complete. This can occur /// in the following circumstances: /// /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. Concretely, /// this occurs when using [`Anchored::Pattern`] without enabling /// [`Config::starts_for_each_pattern`]. /// /// When a search panics, callers cannot know whether a match exists or /// not. /// /// Use [`DFA::try_search`] if you want to handle these panics as error /// values instead. /// /// # Example /// /// This shows a simple example of a one-pass regex that extracts /// capturing group spans. /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Match, Span}; /// /// let re = DFA::new( /// // Notice that we use ASCII here. The corresponding Unicode regex /// // is sadly not one-pass. /// "(?P<first>[[:alpha:]]+)[[:space:]]+(?P<last>[[:alpha:]]+)", /// )?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// /// re.captures(&mut cache, "B<NAME>", &mut caps); /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); /// assert_eq!(Some(Span::from(0..5)), caps.get_group(1)); /// assert_eq!(Some(Span::from(6..17)), caps.get_group_by_name("last")); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn captures<'h, I: Into<Input<'h>>>( &self, cache: &mut Cache, input: I, caps: &mut Captures, ) { let mut input = input.into(); if matches!(input.get_anchored(), Anchored::No) { input.set_anchored(Anchored::Yes); } self.try_search(cache, &input, caps).unwrap(); } /// Executes an anchored leftmost forward search and writes the spans /// of capturing groups that participated in a match into the provided /// [`Captures`] value. If no match was found, then [`Captures::is_match`] /// is guaranteed to return `false`. /// /// The differences with [`DFA::captures`] are: /// /// 1. This returns an error instead of panicking if the search fails. /// 2. Accepts an `&Input` instead of a `Into<Input>`. This permits reusing /// the same input for multiple searches, which _may_ be important for /// latency. /// 3. This does not automatically change the [`Anchored`] mode from `No` /// to `Yes`. Instead, if [`Input::anchored`] is `Anchored::No`, then an /// error is returned. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in the following circumstances: /// /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. Concretely, /// this occurs when using [`Anchored::Pattern`] without enabling /// [`Config::starts_for_each_pattern`]. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example: specific pattern search /// /// This example shows how to build a multi-regex that permits searching /// for specific patterns. Note that this is somewhat less useful than /// in other regex engines, since a one-pass DFA by definition has no /// ambiguity about which pattern can match at a position. That is, if it /// were possible for two different patterns to match at the same starting /// position, then the multi-regex would not be one-pass and construction /// would have failed. /// /// Nevertheless, this can still be useful if you only care about matches /// for a specific pattern, and want the DFA to report "no match" even if /// some other pattern would have matched. /// /// Note that in order to make use of this functionality, /// [`Config::starts_for_each_pattern`] must be enabled. It is disabled /// by default since it may result in higher memory usage. /// /// ``` /// use regex_automata::{ /// dfa::onepass::DFA, Anchored, Input, Match, PatternID, /// }; /// /// let re = DFA::builder() /// .configure(DFA::config().starts_for_each_pattern(true)) /// .build_many(&["[a-z]+", "[0-9]+"])?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "123abc"; /// let input = Input::new(haystack).anchored(Anchored::Yes); /// /// // A normal multi-pattern search will show pattern 1 matches. /// re.try_search(&mut cache, &input, &mut caps)?; /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); /// /// // If we only want to report pattern 0 matches, then we'll get no /// // match here. /// let input = input.anchored(Anchored::Pattern(PatternID::must(0))); /// re.try_search(&mut cache, &input, &mut caps)?; /// assert_eq!(None, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// # Example: specifying the bounds of a search /// /// This example shows how providing the bounds of a search can produce /// different results than simply sub-slicing the haystack. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::onepass::DFA, Anchored, Input, Match}; /// /// // one-pass DFAs fully support Unicode word boundaries! /// // A sad joke is that a Unicode aware regex like \w+\s is not one-pass. /// // :-( /// let re = DFA::new(r"\b[0-9]{3}\b")?; /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); /// let haystack = "foo123bar"; /// /// // Since we sub-slice the haystack, the search doesn't know about /// // the larger context and assumes that `123` is surrounded by word /// // boundaries. And of course, the match position is reported relative /// // to the sub-slice as well, which means we get `0..3` instead of /// // `3..6`. /// let expected = Some(Match::must(0, 0..3)); /// let input = Input::new(&haystack[3..6]).anchored(Anchored::Yes); /// re.try_search(&mut cache, &input, &mut caps)?; /// assert_eq!(expected, caps.get_match()); /// /// // But if we provide the bounds of the search within the context of the /// // entire haystack, then the search can take the surrounding context /// // into account. (And if we did find a match, it would be reported /// // as a valid offset into `haystack` instead of its sub-slice.) /// let expected = None; /// let input = Input::new(haystack).range(3..6).anchored(Anchored::Yes); /// re.try_search(&mut cache, &input, &mut caps)?; /// assert_eq!(expected, caps.get_match()); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search( &self, cache: &mut Cache, input: &Input<'_>, caps: &mut Captures, ) -> Result<(), MatchError> { let pid = self.try_search_slots(cache, input, caps.slots_mut())?; caps.set_pattern(pid); Ok(()) } /// Executes an anchored leftmost forward search and writes the spans /// of capturing groups that participated in a match into the provided /// `slots`, and returns the matching pattern ID. The contents of the /// slots for patterns other than the matching pattern are unspecified. If /// no match was found, then `None` is returned and the contents of all /// `slots` is unspecified. /// /// This is like [`DFA::try_search`], but it accepts a raw slots slice /// instead of a `Captures` value. This is useful in contexts where you /// don't want or need to allocate a `Captures`. /// /// It is legal to pass _any_ number of slots to this routine. If the regex /// engine would otherwise write a slot offset that doesn't fit in the /// provided slice, then it is simply skipped. In general though, there are /// usually three slice lengths you might want to use: /// /// * An empty slice, if you only care about which pattern matched. /// * A slice with /// [`pattern_len() * 2`](crate::dfa::onepass::DFA::pattern_len) /// slots, if you only care about the overall match spans for each matching /// pattern. /// * A slice with /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which /// permits recording match offsets for every capturing group in every /// pattern. /// /// # Errors /// /// This routine errors if the search could not complete. This can occur /// in the following circumstances: /// /// * When the provided `Input` configuration is not supported. For /// example, by providing an unsupported anchor mode. Concretely, /// this occurs when using [`Anchored::Pattern`] without enabling /// [`Config::starts_for_each_pattern`]. /// /// When a search returns an error, callers cannot know whether a match /// exists or not. /// /// # Example /// /// This example shows how to find the overall match offsets in a /// multi-pattern search without allocating a `Captures` value. Indeed, we /// can put our slots right on the stack. /// /// ``` /// use regex_automata::{dfa::onepass::DFA, Anchored, Input, PatternID}; /// /// let re = DFA::new_many(&[ /// r"[a-zA-Z]+", /// r"[0-9]+", /// ])?; /// let mut cache = re.create_cache(); /// let input = Input::new("123").anchored(Anchored::Yes); /// /// // We only care about the overall match offsets here, so we just /// // allocate two slots for each pattern. Each slot records the start /// // and end of the match. /// let mut slots = [None; 4]; /// let pid = re.try_search_slots(&mut cache, &input, &mut slots)?; /// assert_eq!(Some(PatternID::must(1)), pid); /// /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. /// // See 'GroupInfo' for more details on the mapping between groups and /// // slot indices. /// let slot_start = pid.unwrap().as_usize() * 2; /// let slot_end = slot_start + 1; /// assert_eq!(Some(0), slots[slot_start].map(|s| s.get())); /// assert_eq!(Some(3), slots[slot_end].map(|s| s.get())); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[inline] pub fn try_search_slots( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Result<Option<PatternID>, MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); if !utf8empty { return self.try_search_slots_imp(cache, input, slots); } // See PikeVM::try_search_slots for why we do this. let min = self.get_nfa().group_info().implicit_slot_len(); if slots.len() >= min { return self.try_search_slots_imp(cache, input, slots); } if self.get_nfa().pattern_len() == 1 { let mut enough = [None, None]; let got = self.try_search_slots_imp(cache, input, &mut enough)?; // This is OK because we know `enough_slots` is strictly bigger // than `slots`, otherwise this special case isn't reached. slots.copy_from_slice(&enough[..slots.len()]); return Ok(got); } let mut enough = vec![None; min]; let got = self.try_search_slots_imp(cache, input, &mut enough)?; // This is OK because we know `enough_slots` is strictly bigger than // `slots`, otherwise this special case isn't reached. slots.copy_from_slice(&enough[..slots.len()]); Ok(got) } #[inline(never)] fn try_search_slots_imp( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Result<Option<PatternID>, MatchError> { let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); match self.search_imp(cache, input, slots)? { None => return Ok(None), Some(pid) if !utf8empty => return Ok(Some(pid)), Some(pid) => { // These slot indices are always correct because we know our // 'pid' is valid and thus we know that the slot indices for it // are valid. let slot_start = pid.as_usize().wrapping_mul(2); let slot_end = slot_start.wrapping_add(1); // OK because we know we have a match and we know our caller // provided slots are big enough (which we make true above if // the caller didn't). Namely, we're only here when 'utf8empty' // is true, and when that's true, we require slots for every // pattern. let start = slots[slot_start].unwrap().get(); let end = slots[slot_end].unwrap().get(); // If our match splits a codepoint, then we cannot report is // as a match. And since one-pass DFAs only support anchored // searches, we don't try to skip ahead to find the next match. // We can just quit with nothing. if start == end && !input.is_char_boundary(start) { return Ok(None); } Ok(Some(pid)) } } } } impl DFA { fn search_imp( &self, cache: &mut Cache, input: &Input<'_>, slots: &mut [Option<NonMaxUsize>], ) -> Result<Option<PatternID>, MatchError> { // PERF: Some ideas. I ran out of steam after my initial impl to try // many of these. // // 1) Try doing more state shuffling. Right now, all we do is push // match states to the end of the transition table so that we can do // 'if sid >= self.min_match_id' to know whether we're in a match // state or not. But what about doing something like dense DFAs and // pushing dead, match and states with captures/looks all toward the // beginning of the transition table. Then we could do 'if sid <= // self.max_special_id', in which case, we need to do some special // handling of some sort. Otherwise, we get the happy path, just // like in a DFA search. The main argument against this is that the // one-pass DFA is likely to be used most often with capturing groups // and if capturing groups are common, then this might wind up being a // pessimization. // // 2) Consider moving 'PatternEpsilons' out of the transition table. // It is only needed for match states and usually a small minority of // states are match states. Therefore, we're using an extra 'u64' for // most states. // // 3) I played around with the match state handling and it seems like // there is probably a lot left on the table for improvement. The // key tension is that the 'find_match' routine is a giant mess, but // splitting it out into a non-inlineable function is a non-starter // because the match state might consume input, so 'find_match' COULD // be called quite a lot, and a function call at that point would trash // perf. In theory, we could detect whether a match state consumes // input and then specialize our search routine based on that. In that // case, maybe an extra function call is OK, but even then, it might be // too much of a latency hit. Another idea is to just try and figure // out how to reduce the code size of 'find_match'. RE2 has a trick // here where the match handling isn't done if we know the next byte of // input yields a match too. Maybe we adopt that? // // This just might be a tricky DFA to optimize. if input.is_done() { return Ok(None); } // We unfortunately have a bit of book-keeping to do to set things // up. We do have to setup our cache and clear all of our slots. In // particular, clearing the slots is necessary for the case where we // report a match, but one of the capturing groups didn't participate // in the match but had a span set from a previous search. That would // be bad. In theory, we could avoid all this slot clearing if we knew // that every slot was always activated for every match. Then we would // know they would always be overwritten when a match is found. let explicit_slots_len = core::cmp::min( Slots::LIMIT, slots.len().saturating_sub(self.explicit_slot_start), ); cache.setup_search(explicit_slots_len); for slot in cache.explicit_slots() { *slot = None; } for slot in slots.iter_mut() { *slot = None; } // We set the starting slots for every pattern up front. This does // increase our latency somewhat, but it avoids having to do it every // time we see a match state (which could be many times in a single // search if the match state consumes input). for pid in self.nfa.patterns() { let i = pid.as_usize() * 2; if i >= slots.len() { break; } slots[i] = NonMaxUsize::new(input.start()); } let mut pid = None; let mut next_sid = match input.get_anchored() { Anchored::Yes => self.start(), Anchored::Pattern(pid) => self.start_pattern(pid)?, Anchored::No => { // If the regex is itself always anchored, then we're fine, // even if the search is configured to be unanchored. if !self.nfa.is_always_start_anchored() { return Err(MatchError::unsupported_anchored( Anchored::No, )); } self.start() } }; let leftmost_first = matches!(self.config.get_match_kind(), MatchKind::LeftmostFirst); for at in input.start()..input.end() { let sid = next_sid; let trans = self.transition(sid, input.haystack()[at]); next_sid = trans.state_id(); let epsilons = trans.epsilons(); if sid >= self.min_match_id { if self.find_match(cache, input, at, sid, slots, &mut pid) { if input.get_earliest() || (leftmost_first && trans.match_wins()) { return Ok(pid); } } } if sid == DEAD || (!epsilons.looks().is_empty() && !self.nfa.look_matcher().matches_set_inline( epsilons.looks(), input.haystack(), at, )) { return Ok(pid); } epsilons.slots().apply(at, cache.explicit_slots()); } if next_sid >= self.min_match_id { self.find_match( cache, input, input.end(), next_sid, slots, &mut pid, ); } Ok(pid) } /// Assumes 'sid' is a match state and looks for whether a match can /// be reported. If so, appropriate offsets are written to 'slots' and /// 'matched_pid' is set to the matching pattern ID. /// /// Even when 'sid' is a match state, it's possible that a match won't /// be reported. For example, when the conditional epsilon transitions /// leading to the match state aren't satisfied at the given position in /// the haystack. #[cfg_attr(feature = "perf-inline", inline(always))] fn find_match( &self, cache: &mut Cache, input: &Input<'_>, at: usize, sid: StateID, slots: &mut [Option<NonMaxUsize>], matched_pid: &mut Option<PatternID>, ) -> bool { debug_assert!(sid >= self.min_match_id); let pateps = self.pattern_epsilons(sid); let epsilons = pateps.epsilons(); if !epsilons.looks().is_empty() && !self.nfa.look_matcher().matches_set_inline( epsilons.looks(), input.haystack(), at, ) { return false; } let pid = pateps.pattern_id_unchecked(); // This calculation is always correct because we know our 'pid' is // valid and thus we know that the slot indices for it are valid. let slot_end = pid.as_usize().wrapping_mul(2).wrapping_add(1); // Set the implicit 'end' slot for the matching pattern. (The 'start' // slot was set at the beginning of the search.) if slot_end < slots.len() { slots[slot_end] = NonMaxUsize::new(at); } // If the caller provided enough room, copy the previously recorded // explicit slots from our scratch space to the caller provided slots. // We *also* need to set any explicit slots that are active as part of // the path to the match state. if self.explicit_slot_start < slots.len() { // NOTE: The 'cache.explicit_slots()' slice is setup at the // beginning of every search such that it is guaranteed to return a // slice of length equivalent to 'slots[explicit_slot_start..]'. slots[self.explicit_slot_start..] .copy_from_slice(cache.explicit_slots()); epsilons.slots().apply(at, &mut slots[self.explicit_slot_start..]); } *matched_pid = Some(pid); true } } impl DFA { /// Returns the anchored start state for matching any pattern in this DFA. fn start(&self) -> StateID { self.starts[0] } /// Returns the anchored start state for matching the given pattern. If /// 'starts_for_each_pattern' /// was not enabled, then this returns an error. If the given pattern is /// not in this DFA, then `Ok(None)` is returned. fn start_pattern(&self, pid: PatternID) -> Result<StateID, MatchError> { if !self.config.get_starts_for_each_pattern() { return Err(MatchError::unsupported_anchored(Anchored::Pattern( pid, ))); } // 'starts' always has non-zero length. The first entry is always the // anchored starting state for all patterns, and the following entries // are optional and correspond to the anchored starting states for // patterns at pid+1. Thus, starts.len()-1 corresponds to the total // number of patterns that one can explicitly search for. (And it may // be zero.) Ok(self.starts.get(pid.one_more()).copied().unwrap_or(DEAD)) } /// Returns the transition from the given state ID and byte of input. The /// transition includes the next state ID, the slots that should be saved /// and any conditional epsilon transitions that must be satisfied in order /// to take this transition. fn transition(&self, sid: StateID, byte: u8) -> Transition { let offset = sid.as_usize() << self.stride2(); let class = self.classes.get(byte).as_usize(); self.table[offset + class] } /// Set the transition from the given state ID and byte of input to the /// transition given. fn set_transition(&mut self, sid: StateID, byte: u8, to: Transition) { let offset = sid.as_usize() << self.stride2(); let class = self.classes.get(byte).as_usize(); self.table[offset + class] = to; } /// Return an iterator of "sparse" transitions for the given state ID. /// "sparse" in this context means that consecutive transitions that are /// equivalent are returned as one group, and transitions to the DEAD state /// are ignored. /// /// This winds up being useful for debug printing, since it's much terser /// to display runs of equivalent transitions than the transition for every /// possible byte value. Indeed, in practice, it's very common for runs /// of equivalent transitions to appear. fn sparse_transitions(&self, sid: StateID) -> SparseTransitionIter<'_> { let start = sid.as_usize() << self.stride2(); let end = start + self.alphabet_len(); SparseTransitionIter { it: self.table[start..end].iter().enumerate(), cur: None, } } /// Return the pattern epsilons for the given state ID. /// /// If the given state ID does not correspond to a match state ID, then the /// pattern epsilons returned is empty. fn pattern_epsilons(&self, sid: StateID) -> PatternEpsilons { let offset = sid.as_usize() << self.stride2(); PatternEpsilons(self.table[offset + self.pateps_offset].0) } /// Set the pattern epsilons for the given state ID. fn set_pattern_epsilons(&mut self, sid: StateID, pateps: PatternEpsilons) { let offset = sid.as_usize() << self.stride2(); self.table[offset + self.pateps_offset] = Transition(pateps.0); } /// Returns the state ID prior to the one given. This returns None if the /// given ID is the first DFA state. fn prev_state_id(&self, id: StateID) -> Option<StateID> { if id == DEAD { None } else { // CORRECTNESS: Since 'id' is not the first state, subtracting 1 // is always valid. Some(StateID::new_unchecked(id.as_usize().checked_sub(1).unwrap())) } } /// Returns the state ID of the last state in this DFA's transition table. /// "last" in this context means the last state to appear in memory, i.e., /// the one with the greatest ID. fn last_state_id(&self) -> StateID { // CORRECTNESS: A DFA table is always non-empty since it always at // least contains a DEAD state. Since every state has the same stride, // we can just compute what the "next" state ID would have been and // then subtract 1 from it. StateID::new_unchecked( (self.table.len() >> self.stride2()).checked_sub(1).unwrap(), ) } /// Move the transitions from 'id1' to 'id2' and vice versa. /// /// WARNING: This does not update the rest of the transition table to have /// transitions to 'id1' changed to 'id2' and vice versa. This merely moves /// the states in memory. pub(super) fn swap_states(&mut self, id1: StateID, id2: StateID) { let o1 = id1.as_usize() << self.stride2(); let o2 = id2.as_usize() << self.stride2(); for b in 0..self.stride() { self.table.swap(o1 + b, o2 + b); } } /// Map all state IDs in this DFA (transition table + start states) /// according to the closure given. pub(super) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { for i in 0..self.state_len() { let offset = i << self.stride2(); for b in 0..self.alphabet_len() { let next = self.table[offset + b].state_id(); self.table[offset + b].set_state_id(map(next)); } } for i in 0..self.starts.len() { self.starts[i] = map(self.starts[i]); } } } impl core::fmt::Debug for DFA { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { fn debug_state_transitions( f: &mut core::fmt::Formatter, dfa: &DFA, sid: StateID, ) -> core::fmt::Result { for (i, (start, end, trans)) in dfa.sparse_transitions(sid).enumerate() { let next = trans.state_id(); if i > 0 { write!(f, ", ")?; } if start == end { write!( f, "{:?} => {:?}", DebugByte(start), next.as_usize(), )?; } else { write!( f, "{:?}-{:?} => {:?}", DebugByte(start), DebugByte(end), next.as_usize(), )?; } if trans.match_wins() { write!(f, " (MW)")?; } if !trans.epsilons().is_empty() { write!(f, " ({:?})", trans.epsilons())?; } } Ok(()) } writeln!(f, "onepass::DFA(")?; for index in 0..self.state_len() { let sid = StateID::must(index); let pateps = self.pattern_epsilons(sid); if sid == DEAD { write!(f, "D ")?; } else if pateps.pattern_id().is_some() { write!(f, "* ")?; } else { write!(f, " ")?; } write!(f, "{:06?}", sid.as_usize())?; if !pateps.is_empty() { write!(f, " ({:?})", pateps)?; } write!(f, ": ")?; debug_state_transitions(f, self, sid)?; write!(f, "\n")?; } writeln!(f, "")?; for (i, &sid) in self.starts.iter().enumerate() { if i == 0 { writeln!(f, "START(ALL): {:?}", sid.as_usize())?; } else { writeln!( f, "START(pattern: {:?}): {:?}", i - 1, sid.as_usize(), )?; } } writeln!(f, "state length: {:?}", self.state_len())?; writeln!(f, "pattern length: {:?}", self.pattern_len())?; writeln!(f, ")")?; Ok(()) } } /// An iterator over groups of consecutive equivalent transitions in a single /// state. #[derive(Debug)] struct SparseTransitionIter<'a> { it: core::iter::Enumerate<core::slice::Iter<'a, Transition>>, cur: Option<(u8, u8, Transition)>, } impl<'a> Iterator for SparseTransitionIter<'a> { type Item = (u8, u8, Transition); fn next(&mut self) -> Option<(u8, u8, Transition)> { while let Some((b, &trans)) = self.it.next() { // Fine because we'll never have more than u8::MAX transitions in // one state. let b = b.as_u8(); let (prev_start, prev_end, prev_trans) = match self.cur { Some(t) => t, None => { self.cur = Some((b, b, trans)); continue; } }; if prev_trans == trans { self.cur = Some((prev_start, b, prev_trans)); } else { self.cur = Some((b, b, trans)); if prev_trans.state_id() != DEAD { return Some((prev_start, prev_end, prev_trans)); } } } if let Some((start, end, trans)) = self.cur.take() { if trans.state_id() != DEAD { return Some((start, end, trans)); } } None } } /// A cache represents mutable state that a one-pass [`DFA`] requires during a /// search. /// /// For a given one-pass DFA, its corresponding cache may be created either via /// [`DFA::create_cache`], or via [`Cache::new`]. They are equivalent in every /// way, except the former does not require explicitly importing `Cache`. /// /// A particular `Cache` is coupled with the one-pass DFA from which it was /// created. It may only be used with that one-pass DFA. A cache and its /// allocations may be re-purposed via [`Cache::reset`], in which case, it can /// only be used with the new one-pass DFA (and not the old one). #[derive(Clone, Debug)] pub struct Cache { /// Scratch space used to store slots during a search. Basically, we use /// the caller provided slots to store slots known when a match occurs. /// But after a match occurs, we might continue a search but ultimately /// fail to extend the match. When continuing the search, we need some /// place to store candidate capture offsets without overwriting the slot /// offsets recorded for the most recently seen match. explicit_slots: Vec<Option<NonMaxUsize>>, /// The number of slots in the caller-provided 'Captures' value for the /// current search. This is always at most 'explicit_slots.len()', but /// might be less than it, if the caller provided fewer slots to fill. explicit_slot_len: usize, } impl Cache { /// Create a new [`onepass::DFA`](DFA) cache. /// /// A potentially more convenient routine to create a cache is /// [`DFA::create_cache`], as it does not require also importing the /// `Cache` type. /// /// If you want to reuse the returned `Cache` with some other one-pass DFA, /// then you must call [`Cache::reset`] with the desired one-pass DFA. pub fn new(re: &DFA) -> Cache { let mut cache = Cache { explicit_slots: vec![], explicit_slot_len: 0 }; cache.reset(re); cache } /// Reset this cache such that it can be used for searching with a /// different [`onepass::DFA`](DFA). /// /// A cache reset permits reusing memory already allocated in this cache /// with a different one-pass DFA. /// /// # Example /// /// This shows how to re-purpose a cache for use with a different one-pass /// DFA. /// /// ``` /// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{dfa::onepass::DFA, Match}; /// /// let re1 = DFA::new(r"\w")?; /// let re2 = DFA::new(r"\W")?; /// let mut caps1 = re1.create_captures(); /// let mut caps2 = re2.create_captures(); /// /// let mut cache = re1.create_cache(); /// assert_eq!( /// Some(Match::must(0, 0..2)), /// { re1.captures(&mut cache, "Δ", &mut caps1); caps1.get_match() }, /// ); /// /// // Using 'cache' with re2 is not allowed. It may result in panics or /// // incorrect results. In order to re-purpose the cache, we must reset /// // it with the one-pass DFA we'd like to use it with. /// // /// // Similarly, after this reset, using the cache with 're1' is also not /// // allowed. /// re2.reset_cache(&mut cache); /// assert_eq!( /// Some(Match::must(0, 0..3)), /// { re2.captures(&mut cache, "☃", &mut caps2); caps2.get_match() }, /// ); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn reset(&mut self, re: &DFA) { let explicit_slot_len = re.get_nfa().group_info().explicit_slot_len(); self.explicit_slots.resize(explicit_slot_len, None); self.explicit_slot_len = explicit_slot_len; } /// Returns the heap memory usage, in bytes, of this cache. /// /// This does **not** include the stack size used up by this cache. To /// compute that, use `std::mem::size_of::<Cache>()`. pub fn memory_usage(&self) -> usize { self.explicit_slots.len() * core::mem::size_of::<Option<NonMaxUsize>>() } fn explicit_slots(&mut self) -> &mut [Option<NonMaxUsize>] { &mut self.explicit_slots[..self.explicit_slot_len] } fn setup_search(&mut self, explicit_slot_len: usize) { self.explicit_slot_len = explicit_slot_len; } } /// Represents a single transition in a one-pass DFA. /// /// The high 24 bits corresponds to the state ID. The low 48 bits corresponds /// to the transition epsilons, which contains the slots that should be saved /// when this transition is followed and the conditional epsilon transitions /// that must be satisfied in order to follow this transition. #[derive(Clone, Copy, Eq, PartialEq)] struct Transition(u64); impl Transition { const STATE_ID_BITS: u64 = 21; const STATE_ID_SHIFT: u64 = 64 - Transition::STATE_ID_BITS; const STATE_ID_LIMIT: u64 = 1 << Transition::STATE_ID_BITS; const MATCH_WINS_SHIFT: u64 = 64 - (Transition::STATE_ID_BITS + 1); const INFO_MASK: u64 = 0x000003FF_FFFFFFFF; /// Return a new transition to the given state ID with the given epsilons. fn new(match_wins: bool, sid: StateID, epsilons: Epsilons) -> Transition { let match_wins = if match_wins { 1 << Transition::MATCH_WINS_SHIFT } else { 0 }; let sid = sid.as_u64() << Transition::STATE_ID_SHIFT; Transition(sid | match_wins | epsilons.0) } /// Returns true if and only if this transition points to the DEAD state. fn is_dead(self) -> bool { self.state_id() == DEAD } /// Return whether this transition has a "match wins" property. /// /// When a transition has this property, it means that if a match has been /// found and the search uses leftmost-first semantics, then that match /// should be returned immediately instead of continuing on. /// /// The "match wins" name comes from RE2, which uses a pretty much /// identical mechanism for implementing leftmost-first semantics. fn match_wins(&self) -> bool { (self.0 >> Transition::MATCH_WINS_SHIFT & 1) == 1 } /// Return the "next" state ID that this transition points to. fn state_id(&self) -> StateID { // OK because a Transition has a valid StateID in its upper bits by // construction. The cast to usize is also correct, even on 16-bit // targets because, again, we know the upper bits is a valid StateID, // which can never overflow usize on any supported target. StateID::new_unchecked( (self.0 >> Transition::STATE_ID_SHIFT).as_usize(), ) } /// Set the "next" state ID in this transition. fn set_state_id(&mut self, sid: StateID) { *self = Transition::new(self.match_wins(), sid, self.epsilons()); } /// Return the epsilons embedded in this transition. fn epsilons(&self) -> Epsilons { Epsilons(self.0 & Transition::INFO_MASK) } } impl core::fmt::Debug for Transition { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { if self.is_dead() { return write!(f, "0"); } write!(f, "{}", self.state_id().as_usize())?; if self.match_wins() { write!(f, "-MW")?; } if !self.epsilons().is_empty() { write!(f, "-{:?}", self.epsilons())?; } Ok(()) } } /// A representation of a match state's pattern ID along with the epsilons for /// when a match occurs. /// /// A match state in a one-pass DFA, unlike in a more general DFA, has exactly /// one pattern ID. If it had more, then the original NFA would not have been /// one-pass. /// /// The "epsilons" part of this corresponds to what was found in the epsilon /// transitions between the transition taken in the last byte of input and the /// ultimate match state. This might include saving slots and/or conditional /// epsilon transitions that must be satisfied before one can report the match. /// /// Technically, every state has room for a 'PatternEpsilons', but it is only /// ever non-empty for match states. #[derive(Clone, Copy)] struct PatternEpsilons(u64); impl PatternEpsilons { const PATTERN_ID_BITS: u64 = 22; const PATTERN_ID_SHIFT: u64 = 64 - PatternEpsilons::PATTERN_ID_BITS; // A sentinel value indicating that this is not a match state. We don't // use 0 since 0 is a valid pattern ID. const PATTERN_ID_NONE: u64 = 0x00000000_003FFFFF; const PATTERN_ID_LIMIT: u64 = PatternEpsilons::PATTERN_ID_NONE; const PATTERN_ID_MASK: u64 = 0xFFFFFC00_00000000; const EPSILONS_MASK: u64 = 0x000003FF_FFFFFFFF; /// Return a new empty pattern epsilons that has no pattern ID and has no /// epsilons. This is suitable for non-match states. fn empty() -> PatternEpsilons { PatternEpsilons( PatternEpsilons::PATTERN_ID_NONE << PatternEpsilons::PATTERN_ID_SHIFT, ) } /// Whether this pattern epsilons is empty or not. It's empty when it has /// no pattern ID and an empty epsilons. fn is_empty(self) -> bool { self.pattern_id().is_none() && self.epsilons().is_empty() } /// Return the pattern ID in this pattern epsilons if one exists. fn pattern_id(self) -> Option<PatternID> { let pid = self.0 >> PatternEpsilons::PATTERN_ID_SHIFT; if pid == PatternEpsilons::PATTERN_ID_LIMIT { None } else { Some(PatternID::new_unchecked(pid.as_usize())) } } /// Returns the pattern ID without checking whether it's valid. If this is /// called and there is no pattern ID in this `PatternEpsilons`, then this /// will likely produce an incorrect result or possibly even a panic or /// an overflow. But safety will not be violated. /// /// This is useful when you know a particular state is a match state. If /// it's a match state, then it must have a pattern ID. fn pattern_id_unchecked(self) -> PatternID { let pid = self.0 >> PatternEpsilons::PATTERN_ID_SHIFT; PatternID::new_unchecked(pid.as_usize()) } /// Return a new pattern epsilons with the given pattern ID, but the same /// epsilons. fn set_pattern_id(self, pid: PatternID) -> PatternEpsilons { PatternEpsilons( (pid.as_u64() << PatternEpsilons::PATTERN_ID_SHIFT) | (self.0 & PatternEpsilons::EPSILONS_MASK), ) } /// Return the epsilons part of this pattern epsilons. fn epsilons(self) -> Epsilons { Epsilons(self.0 & PatternEpsilons::EPSILONS_MASK) } /// Return a new pattern epsilons with the given epsilons, but the same /// pattern ID. fn set_epsilons(self, epsilons: Epsilons) -> PatternEpsilons { PatternEpsilons( (self.0 & PatternEpsilons::PATTERN_ID_MASK) | u64::from(epsilons.0), ) } } impl core::fmt::Debug for PatternEpsilons { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { if self.is_empty() { return write!(f, "N/A"); } if let Some(pid) = self.pattern_id() { write!(f, "{}", pid.as_usize())?; } if !self.epsilons().is_empty() { if self.pattern_id().is_some() { write!(f, "/")?; } write!(f, "{:?}", self.epsilons())?; } Ok(()) } } /// Epsilons represents all of the NFA epsilons transitions that went into a /// single transition in a single DFA state. In this case, it only represents /// the epsilon transitions that have some kind of non-consuming side effect: /// either the transition requires storing the current position of the search /// into a slot, or the transition is conditional and requires the current /// position in the input to satisfy an assertion before the transition may be /// taken. /// /// This folds the cumulative effect of a group of NFA states (all connected /// by epsilon transitions) down into a single set of bits. While these bits /// can represent all possible conditional epsilon transitions, it only permits /// storing up to a somewhat small number of slots. /// /// Epsilons is represented as a 42-bit integer. For example, it is packed into /// the lower 42 bits of a `Transition`. (Where the high 22 bits contains a /// `StateID` and a special "match wins" property.) #[derive(Clone, Copy)] struct Epsilons(u64); impl Epsilons { const SLOT_MASK: u64 = 0x000003FF_FFFFFC00; const SLOT_SHIFT: u64 = 10; const LOOK_MASK: u64 = 0x00000000_000003FF; /// Create a new empty epsilons. It has no slots and no assertions that /// need to be satisfied. fn empty() -> Epsilons { Epsilons(0) } /// Returns true if this epsilons contains no slots and no assertions. fn is_empty(self) -> bool { self.0 == 0 } /// Returns the slot epsilon transitions. fn slots(self) -> Slots { Slots((self.0 >> Epsilons::SLOT_SHIFT).low_u32()) } /// Set the slot epsilon transitions. fn set_slots(self, slots: Slots) -> Epsilons { Epsilons( (u64::from(slots.0) << Epsilons::SLOT_SHIFT) | (self.0 & Epsilons::LOOK_MASK), ) } /// Return the set of look-around assertions in these epsilon transitions. fn looks(self) -> LookSet { LookSet { bits: (self.0 & Epsilons::LOOK_MASK).low_u16() } } /// Set the look-around assertions on these epsilon transitions. fn set_looks(self, look_set: LookSet) -> Epsilons { Epsilons((self.0 & Epsilons::SLOT_MASK) | u64::from(look_set.bits)) } } impl core::fmt::Debug for Epsilons { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let mut wrote = false; if !self.slots().is_empty() { write!(f, "{:?}", self.slots())?; wrote = true; } if !self.looks().is_empty() { if wrote { write!(f, "/")?; } write!(f, "{:?}", self.looks())?; wrote = true; } if !wrote { write!(f, "N/A")?; } Ok(()) } } /// The set of epsilon transitions indicating that the current position in a /// search should be saved to a slot. /// /// This *only* represents explicit slots. So for example, the pattern /// `[a-z]+([0-9]+)([a-z]+)` has: /// /// * 3 capturing groups, thus 6 slots. /// * 1 implicit capturing group, thus 2 implicit slots. /// * 2 explicit capturing groups, thus 4 explicit slots. /// /// While implicit slots are represented by epsilon transitions in an NFA, we /// do not explicitly represent them here. Instead, implicit slots are assumed /// to be present and handled automatically in the search code. Therefore, /// that means we only need to represent explicit slots in our epsilon /// transitions. /// /// Its representation is a bit set. The bit 'i' is set if and only if there /// exists an explicit slot at index 'c', where 'c = (#patterns * 2) + i'. That /// is, the bit 'i' corresponds to the first explicit slot and the first /// explicit slot appears immediately following the last implicit slot. (If /// this is confusing, see `GroupInfo` for more details on how slots works.) /// /// A single `Slots` represents all the active slots in a sub-graph of an NFA, /// where all the states are connected by epsilon transitions. In effect, when /// traversing the one-pass DFA during a search, all slots set in a particular /// transition must be captured by recording the current search position. /// /// The API of `Slots` requires the caller to handle the explicit slot offset. /// That is, a `Slots` doesn't know where the explicit slots start for a /// particular NFA. Thus, if the callers see's the bit 'i' is set, then they /// need to do the arithmetic above to find 'c', which is the real actual slot /// index in the corresponding NFA. #[derive(Clone, Copy)] struct Slots(u32); impl Slots { const LIMIT: usize = 32; /// Insert the slot at the given bit index. fn insert(self, slot: usize) -> Slots { debug_assert!(slot < Slots::LIMIT); Slots(self.0 | (1 << slot.as_u32())) } /// Remove the slot at the given bit index. fn remove(self, slot: usize) -> Slots { debug_assert!(slot < Slots::LIMIT); Slots(self.0 & !(1 << slot.as_u32())) } /// Returns true if and only if this set contains no slots. fn is_empty(self) -> bool { self.0 == 0 } /// Returns an iterator over all of the set bits in this set. fn iter(self) -> SlotsIter { SlotsIter { slots: self } } /// For the position `at` in the current haystack, copy it to /// `caller_explicit_slots` for all slots that are in this set. /// /// Callers may pass a slice of any length. Slots in this set bigger than /// the length of the given explicit slots are simply skipped. /// /// The slice *must* correspond only to the explicit slots and the first /// element of the slice must always correspond to the first explicit slot /// in the corresponding NFA. fn apply( self, at: usize, caller_explicit_slots: &mut [Option<NonMaxUsize>], ) { if self.is_empty() { return; } let at = NonMaxUsize::new(at); for slot in self.iter() { if slot >= caller_explicit_slots.len() { break; } caller_explicit_slots[slot] = at; } } } impl core::fmt::Debug for Slots { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "S")?; for slot in self.iter() { write!(f, "-{:?}", slot)?; } Ok(()) } } /// An iterator over all of the bits set in a slot set. /// /// This returns the bit index that is set, so callers may need to offset it /// to get the actual NFA slot index. #[derive(Debug)] struct SlotsIter { slots: Slots, } impl Iterator for SlotsIter { type Item = usize; fn next(&mut self) -> Option<usize> { // Number of zeroes here is always <= u8::MAX, and so fits in a usize. let slot = self.slots.0.trailing_zeros().as_usize(); if slot >= Slots::LIMIT { return None; } self.slots = self.slots.remove(slot); Some(slot) } } /// An error that occurred during the construction of a one-pass DFA. /// /// This error does not provide many introspection capabilities. There are /// generally only two things you can do with it: /// /// * Obtain a human readable message via its `std::fmt::Display` impl. /// * Access an underlying [`thompson::BuildError`] type from its `source` /// method via the `std::error::Error` trait. This error only occurs when using /// convenience routines for building a one-pass DFA directly from a pattern /// string. /// /// When the `std` feature is enabled, this implements the `std::error::Error` /// trait. #[derive(Clone, Debug)] pub struct BuildError { kind: BuildErrorKind, } /// The kind of error that occurred during the construction of a one-pass DFA. #[derive(Clone, Debug)] enum BuildErrorKind { NFA(crate::nfa::thompson::BuildError), Word(UnicodeWordBoundaryError), TooManyStates { limit: u64 }, TooManyPatterns { limit: u64 }, UnsupportedLook { look: Look }, ExceededSizeLimit { limit: usize }, NotOnePass { msg: &'static str }, } impl BuildError { fn nfa(err: crate::nfa::thompson::BuildError) -> BuildError { BuildError { kind: BuildErrorKind::NFA(err) } } fn word(err: UnicodeWordBoundaryError) -> BuildError { BuildError { kind: BuildErrorKind::Word(err) } } fn too_many_states(limit: u64) -> BuildError { BuildError { kind: BuildErrorKind::TooManyStates { limit } } } fn too_many_patterns(limit: u64) -> BuildError { BuildError { kind: BuildErrorKind::TooManyPatterns { limit } } } fn unsupported_look(look: Look) -> BuildError { BuildError { kind: BuildErrorKind::UnsupportedLook { look } } } fn exceeded_size_limit(limit: usize) -> BuildError { BuildError { kind: BuildErrorKind::ExceededSizeLimit { limit } } } fn not_one_pass(msg: &'static str) -> BuildError { BuildError { kind: BuildErrorKind::NotOnePass { msg } } } } #[cfg(feature = "std")] impl std::error::Error for BuildError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { use self::BuildErrorKind::*; match self.kind { NFA(ref err) => Some(err), Word(ref err) => Some(err), _ => None, } } } impl core::fmt::Display for BuildError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { use self::BuildErrorKind::*; match self.kind { NFA(_) => write!(f, "error building NFA"), Word(_) => write!(f, "NFA contains Unicode word boundary"), TooManyStates { limit } => write!( f, "one-pass DFA exceeded a limit of {:?} for number of states", limit, ), TooManyPatterns { limit } => write!( f, "one-pass DFA exceeded a limit of {:?} for number of patterns", limit, ), UnsupportedLook { look } => write!( f, "one-pass DFA does not support the {:?} assertion", look, ), ExceededSizeLimit { limit } => write!( f, "one-pass DFA exceeded size limit of {:?} during building", limit, ), NotOnePass { msg } => write!( f, "one-pass DFA could not be built because \ pattern is not one-pass: {}", msg, ), } } } #[cfg(all(test, feature = "syntax"))] mod tests { use alloc::string::ToString; use super::*; #[test] fn fail_conflicting_transition() { let predicate = |err: &str| err.contains("conflicting transition"); let err = DFA::new(r"a*[ab]").unwrap_err().to_string(); assert!(predicate(&err), "{}", err); } #[test] fn fail_multiple_epsilon() { let predicate = |err: &str| { err.contains("multiple epsilon transitions to same state") }; let err = DFA::new(r"(^|$)a").unwrap_err().to_string(); assert!(predicate(&err), "{}", err); } #[test] fn fail_multiple_match() { let predicate = |err: &str| { err.contains("multiple epsilon transitions to match state") }; let err = DFA::new_many(&[r"^", r"$"]).unwrap_err().to_string(); assert!(predicate(&err), "{}", err); } // This test is meant to build a one-pass regex with the maximum number of // possible slots. // // NOTE: Remember that the slot limit only applies to explicit capturing // groups. Any number of implicit capturing groups is supported (up to the // maximum number of supported patterns), since implicit groups are handled // by the search loop itself. #[test] fn max_slots() { // One too many... let pat = r"(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)"; assert!(DFA::new(pat).is_err()); // Just right. let pat = r"(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)"; assert!(DFA::new(pat).is_ok()); } // This test ensures that the one-pass DFA works with all look-around // assertions that we expect it to work with. // // The utility of this test is that each one-pass transition has a small // amount of space to store look-around assertions. Currently, there is // logic in the one-pass constructor to ensure there aren't more than ten // possible assertions. And indeed, there are only ten possible assertions // (at time of writing), so this is okay. But conceivably, more assertions // could be added. So we check that things at least work with what we // expect them to work with. #[test] fn assertions() { // haystack anchors assert!(DFA::new(r"^").is_ok()); assert!(DFA::new(r"$").is_ok()); // line anchors assert!(DFA::new(r"(?m)^").is_ok()); assert!(DFA::new(r"(?m)$").is_ok()); assert!(DFA::new(r"(?Rm)^").is_ok()); assert!(DFA::new(r"(?Rm)$").is_ok()); // word boundaries if cfg!(feature = "unicode-word-boundary") { assert!(DFA::new(r"\b").is_ok()); assert!(DFA::new(r"\B").is_ok()); } assert!(DFA::new(r"(?-u)\b").is_ok()); assert!(DFA::new(r"(?-u)\B").is_ok()); } #[cfg(not(miri))] // takes too long on miri #[test] fn is_one_pass() { use crate::util::syntax; assert!(DFA::new(r"a*b").is_ok()); if cfg!(feature = "unicode-perl") { assert!(DFA::new(r"\w").is_ok()); } assert!(DFA::new(r"(?-u)\w*\s").is_ok()); assert!(DFA::new(r"(?s:.)*?").is_ok()); assert!(DFA::builder() .syntax(syntax::Config::new().utf8(false)) .build(r"(?s-u:.)*?") .is_ok()); } #[test] fn is_not_one_pass() { assert!(DFA::new(r"a*a").is_err()); assert!(DFA::new(r"(?s-u:.)*?").is_err()); assert!(DFA::new(r"(?s:.)*?a").is_err()); } #[cfg(not(miri))] #[test] fn is_not_one_pass_bigger() { assert!(DFA::new(r"\w*\s").is_err()); } } <file_sep>/regex-automata/src/nfa/thompson/literal_trie.rs use core::mem; use alloc::{vec, vec::Vec}; use crate::{ nfa::thompson::{self, compiler::ThompsonRef, BuildError, Builder}, util::primitives::{IteratorIndexExt, StateID}, }; /// A trie that preserves leftmost-first match semantics. /// /// This is a purpose-built data structure for optimizing 'lit1|lit2|..|litN' /// patterns. It can *only* handle alternations of literals, which makes it /// somewhat restricted in its scope, but literal alternations are fairly /// common. /// /// At a 5,000 foot level, the main idea of this trie is make an alternation of /// literals look more like a DFA than an NFA via epsilon removal. /// /// More precisely, the main issue is in how alternations are compiled into /// a Thompson NFA. Namely, each alternation gets a single NFA "union" state /// with an epsilon transition for every branch of the alternation pointing to /// an NFA state corresponding to the start of that branch. The main problem /// with this representation is the cost of computing an epsilon closure. Once /// you hit the alternation's start state, it acts as a sort of "clog" that /// requires you to traverse all of the epsilon transitions to compute the full /// closure. /// /// While fixing such clogs in the general case is pretty tricky without going /// to a DFA (or perhaps a Glushkov NFA, but that comes with other problems). /// But at least in the case of an alternation of literals, we can convert /// that to a prefix trie without too much cost. In theory, that's all you /// really need to do: build the trie and then compile it to a Thompson NFA. /// For example, if you have the pattern 'bar|baz|foo', then using a trie, it /// is transformed to something like 'b(a(r|z))|f'. This reduces the clog by /// reducing the number of epsilon transitions out of the alternation's start /// state from 3 to 2 (it actually gets down to 1 when you use a sparse state, /// which we do below). It's a small effect here, but when your alternation is /// huge, the savings is also huge. /// /// And that is... essentially what a LiteralTrie does. But there is one /// hiccup. Consider a regex like 'sam|samwise'. How does a prefix trie compile /// that when leftmost-first semantics are used? If 'sam|samwise' was the /// entire regex, then you could just drop the 'samwise' branch entirely since /// it is impossible to match ('sam' will always take priority, and since it /// is a prefix of 'samwise', 'samwise' will never match). But what about the /// regex '\b(sam|samwise)\b'? In that case, you can't remove 'samwise' because /// it might match when 'sam' doesn't fall on a word boundary. /// /// The main idea is that 'sam|samwise' can be translated to 'sam(?:|wise)', /// which is a precisely equivalent regex that also gets rid of the clog. /// /// Another example is 'zapper|z|zap'. That gets translated to /// 'z(?:apper||ap)'. /// /// We accomplish this by giving each state in the trie multiple "chunks" of /// transitions. Each chunk barrier represents a match. The idea is that once /// you know a match occurs, none of the transitions after the match can be /// re-ordered and mixed in with the transitions before the match. Otherwise, /// the match semantics could be changed. /// /// See the 'State' data type for a bit more detail. /// /// Future work: /// /// * In theory, it would be nice to generalize the idea of removing clogs and /// apply it to the NFA graph itself. Then this could in theory work for /// case insensitive alternations of literals, or even just alternations where /// each branch starts with a non-epsilon transition. /// * Could we instead use the Aho-Corasick algorithm here? The aho-corasick /// crate deals with leftmost-first matches correctly, but I think this implies /// encoding failure transitions into a Thompson NFA somehow. Which seems fine, /// because failure transitions are just unconditional epsilon transitions? /// * Or perhaps even better, could we use an aho_corasick::AhoCorasick /// directly? At time of writing, 0.7 is the current version of the /// aho-corasick crate, and that definitely cannot be used as-is. But if we /// expose the underlying finite state machine API, then could we use it? That /// would be super. If we could figure that out, it might also lend itself to /// more general composition of finite state machines. #[derive(Clone)] pub(crate) struct LiteralTrie { /// The set of trie states. Each state contains one or more chunks, where /// each chunk is a sparse set of transitions to other states. A leaf state /// is always a match state that contains only empty chunks (i.e., no /// transitions). states: Vec<State>, /// Whether to add literals in reverse to the trie. Useful when building /// a reverse NFA automaton. rev: bool, } impl LiteralTrie { /// Create a new literal trie that adds literals in the forward direction. pub(crate) fn forward() -> LiteralTrie { let root = State::default(); LiteralTrie { states: vec![root], rev: false } } /// Create a new literal trie that adds literals in reverse. pub(crate) fn reverse() -> LiteralTrie { let root = State::default(); LiteralTrie { states: vec![root], rev: true } } /// Add the given literal to this trie. /// /// If the literal could not be added because the `StateID` space was /// exhausted, then an error is returned. If an error returns, the trie /// is in an unspecified state. pub(crate) fn add(&mut self, bytes: &[u8]) -> Result<(), BuildError> { let mut prev = StateID::ZERO; let mut it = bytes.iter().copied(); while let Some(b) = if self.rev { it.next_back() } else { it.next() } { prev = self.get_or_add_state(prev, b)?; } self.states[prev].add_match(); Ok(()) } /// If the given transition is defined, then return the next state ID. /// Otherwise, add the transition to `from` and point it to a new state. /// /// If a new state ID could not be allocated, then an error is returned. fn get_or_add_state( &mut self, from: StateID, byte: u8, ) -> Result<StateID, BuildError> { let active = self.states[from].active_chunk(); match active.binary_search_by_key(&byte, |t| t.byte) { Ok(i) => Ok(active[i].next), Err(i) => { // Add a new state and get its ID. let next = StateID::new(self.states.len()).map_err(|_| { BuildError::too_many_states(self.states.len()) })?; self.states.push(State::default()); // Offset our position to account for all transitions and not // just the ones in the active chunk. let i = self.states[from].active_chunk_start() + i; let t = Transition { byte, next }; self.states[from].transitions.insert(i, t); Ok(next) } } } /// Compile this literal trie to the NFA builder given. /// /// This forwards any errors that may occur while using the given builder. pub(crate) fn compile( &self, builder: &mut Builder, ) -> Result<ThompsonRef, BuildError> { // Compilation proceeds via depth-first traversal of the trie. // // This is overall pretty brutal. The recursive version of this is // deliciously simple. (See 'compile_to_hir' below for what it might // look like.) But recursion on a trie means your call stack grows // in accordance with the longest literal, which just does not seem // appropriate. So we push the call stack to the heap. But as a result, // the trie traversal becomes pretty brutal because we essentially // have to encode the state of a double for-loop into an explicit call // frame. If someone can simplify this without using recursion, that'd // be great. // 'end' is our match state for this trie, but represented in the the // NFA. Any time we see a match in the trie, we insert a transition // from the current state we're in to 'end'. let end = builder.add_empty()?; let mut stack = vec![]; let mut f = Frame::new(&self.states[StateID::ZERO]); loop { if let Some(t) = f.transitions.next() { if self.states[t.next].is_leaf() { f.sparse.push(thompson::Transition { start: t.byte, end: t.byte, next: end, }); } else { f.sparse.push(thompson::Transition { start: t.byte, end: t.byte, // This is a little funny, but when the frame we create // below completes, it will pop this parent frame off // and modify this transition to point to the correct // state. next: StateID::ZERO, }); stack.push(f); f = Frame::new(&self.states[t.next]); } continue; } // At this point, we have visited all transitions in f.chunk, so // add it as a sparse NFA state. Unless the chunk was empty, in // which case, we don't do anything. if !f.sparse.is_empty() { let chunk_id = if f.sparse.len() == 1 { builder.add_range(f.sparse.pop().unwrap())? } else { let sparse = mem::replace(&mut f.sparse, vec![]); builder.add_sparse(sparse)? }; f.union.push(chunk_id); } // Now we need to look to see if there are other chunks to visit. if let Some(chunk) = f.chunks.next() { // If we're here, it means we're on the second (or greater) // chunk, which implies there is a match at this point. So // connect this state to the final end state. f.union.push(end); // Advance to the next chunk. f.transitions = chunk.iter(); continue; } // Now that we are out of chunks, we have completely visited // this state. So turn our union of chunks into an NFA union // state, and add that union state to the parent state's current // sparse state. (If there is no parent, we're done.) let start = builder.add_union(f.union)?; match stack.pop() { None => { return Ok(ThompsonRef { start, end }); } Some(mut parent) => { // OK because the only way a frame gets pushed on to the // stack (aside from the root) is when a transition has // been added to 'sparse'. parent.sparse.last_mut().unwrap().next = start; f = parent; } } } } /// Converts this trie to an equivalent HIR expression. /// /// We don't actually use this, but it's useful for tests. In particular, /// it provides a (somewhat) human readable representation of the trie /// itself. #[cfg(test)] fn compile_to_hir(&self) -> regex_syntax::hir::Hir { self.compile_state_to_hir(StateID::ZERO) } /// The recursive implementation of 'to_hir'. /// /// Notice how simple this is compared to 'compile' above. 'compile' could /// be similarly simple, but we opt to not use recursion in order to avoid /// overflowing the stack in the case of a longer literal. #[cfg(test)] fn compile_state_to_hir(&self, sid: StateID) -> regex_syntax::hir::Hir { use regex_syntax::hir::Hir; let mut alt = vec![]; for (i, chunk) in self.states[sid].chunks().enumerate() { if i > 0 { alt.push(Hir::empty()); } if chunk.is_empty() { continue; } let mut chunk_alt = vec![]; for t in chunk.iter() { chunk_alt.push(Hir::concat(vec![ Hir::literal(vec![t.byte]), self.compile_state_to_hir(t.next), ])); } alt.push(Hir::alternation(chunk_alt)); } Hir::alternation(alt) } } impl core::fmt::Debug for LiteralTrie { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { writeln!(f, "LiteralTrie(")?; for (sid, state) in self.states.iter().with_state_ids() { writeln!(f, "{:06?}: {:?}", sid.as_usize(), state)?; } writeln!(f, ")")?; Ok(()) } } /// An explicit stack frame used for traversing the trie without using /// recursion. /// /// Each frame is tied to the traversal of a single trie state. The frame is /// dropped once the entire state (and all of its children) have been visited. /// The "output" of compiling a state is the 'union' vector, which is turn /// converted to a NFA union state. Each branch of the union corresponds to a /// chunk in the trie state. /// /// 'sparse' corresponds to the set of transitions for a particular chunk in a /// trie state. It is ultimately converted to an NFA sparse state. The 'sparse' /// field, after being converted to a sparse NFA state, is reused for any /// subsequent chunks in the trie state, if any exist. #[derive(Debug)] struct Frame<'a> { /// The remaining chunks to visit for a trie state. chunks: StateChunksIter<'a>, /// The transitions of the current chunk that we're iterating over. Since /// every trie state has at least one chunk, every frame is initialized /// with the first chunk's transitions ready to be consumed. transitions: core::slice::Iter<'a, Transition>, /// The NFA state IDs pointing to the start of each chunk compiled by /// this trie state. This ultimately gets converted to an NFA union once /// the entire trie state (and all of its children) have been compiled. /// The order of these matters for leftmost-first match semantics, since /// earlier matches in the union are preferred over later ones. union: Vec<StateID>, /// The actual NFA transitions for a single chunk in a trie state. This /// gets converted to an NFA sparse state, and its corresponding NFA state /// ID should get added to 'union'. sparse: Vec<thompson::Transition>, } impl<'a> Frame<'a> { /// Create a new stack frame for trie traversal. This initializes the /// 'transitions' iterator to the transitions for the first chunk, with the /// 'chunks' iterator being every chunk after the first one. fn new(state: &'a State) -> Frame<'a> { let mut chunks = state.chunks(); // every state has at least 1 chunk let chunk = chunks.next().unwrap(); let transitions = chunk.iter(); Frame { chunks, transitions, union: vec![], sparse: vec![] } } } /// A state in a trie. /// /// This uses a sparse representation. Since we don't use literal tries /// for searching, and ultimately (and compilation requires visiting every /// transition anyway), we use a sparse representation for transitions. This /// means we save on memory, at the expense of 'LiteralTrie::add' being perhaps /// a bit slower. /// /// While 'transitions' is pretty standard as far as tries goes, the 'chunks' /// piece here is more unusual. In effect, 'chunks' defines a partitioning /// of 'transitions', where each chunk corresponds to a distinct set of /// transitions. The key invariant is that a transition in one chunk cannot /// be moved to another chunk. This is the secret sauce that preserve /// leftmost-first match semantics. /// /// A new chunk is added whenever we mark a state as a match state. Once a /// new chunk is added, the old active chunk is frozen and is never mutated /// again. The new chunk becomes the active chunk, which is defined as /// '&transitions[chunks.last().map_or(0, |c| c.1)..]'. Thus, a state where /// 'chunks' is empty actually contains one chunk. Thus, every state contains /// at least one (possibly empty) chunk. /// /// A "leaf" state is a state that has no outgoing transitions (so /// 'transitions' is empty). Note that there is no way for a leaf state to be a /// non-matching state. (Although while building the trie, within 'add', a leaf /// state may exist while not containing any matches. But this invariant is /// only broken within 'add'. Once 'add' returns, the invariant is upheld.) #[derive(Clone, Default)] struct State { transitions: Vec<Transition>, chunks: Vec<(usize, usize)>, } impl State { /// Mark this state as a match state and freeze the active chunk such that /// it can not be further mutated. fn add_match(&mut self) { // This is not strictly necessary, but there's no point in recording // another match by adding another chunk if the state has no // transitions. Note though that we only skip this if we already know // this is a match state, which is only true if 'chunks' is not empty. // Basically, if we didn't do this, nothing semantically would change, // but we'd end up pushing another chunk and potentially triggering an // alloc. if self.transitions.is_empty() && !self.chunks.is_empty() { return; } let chunk_start = self.active_chunk_start(); let chunk_end = self.transitions.len(); self.chunks.push((chunk_start, chunk_end)); } /// Returns true if and only if this state is a leaf state. That is, a /// state that has no outgoing transitions. fn is_leaf(&self) -> bool { self.transitions.is_empty() } /// Returns an iterator over all of the chunks (including the currently /// active chunk) in this state. Since the active chunk is included, the /// iterator is guaranteed to always yield at least one chunk (although the /// chunk may be empty). fn chunks(&self) -> StateChunksIter<'_> { StateChunksIter { transitions: &*self.transitions, chunks: self.chunks.iter(), active: Some(self.active_chunk()), } } /// Returns the active chunk as a slice of transitions. fn active_chunk(&self) -> &[Transition] { let start = self.active_chunk_start(); &self.transitions[start..] } /// Returns the index into 'transitions' where the active chunk starts. fn active_chunk_start(&self) -> usize { self.chunks.last().map_or(0, |&(_, end)| end) } } impl core::fmt::Debug for State { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let mut spacing = " "; for (i, chunk) in self.chunks().enumerate() { if i > 0 { write!(f, "{}MATCH", spacing)?; } spacing = ""; for (j, t) in chunk.iter().enumerate() { spacing = " "; if j == 0 && i > 0 { write!(f, " ")?; } else if j > 0 { write!(f, ", ")?; } write!(f, "{:?}", t)?; } } Ok(()) } } /// An iterator over all of the chunks in a state, including the active chunk. /// /// This iterator is created by `State::chunks`. We name this iterator so that /// we can include it in the `Frame` type for non-recursive trie traversal. #[derive(Debug)] struct StateChunksIter<'a> { transitions: &'a [Transition], chunks: core::slice::Iter<'a, (usize, usize)>, active: Option<&'a [Transition]>, } impl<'a> Iterator for StateChunksIter<'a> { type Item = &'a [Transition]; fn next(&mut self) -> Option<&'a [Transition]> { if let Some(&(start, end)) = self.chunks.next() { return Some(&self.transitions[start..end]); } if let Some(chunk) = self.active.take() { return Some(chunk); } None } } /// A single transition in a trie to another state. #[derive(Clone, Copy)] struct Transition { byte: u8, next: StateID, } impl core::fmt::Debug for Transition { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!( f, "{:?} => {}", crate::util::escape::DebugByte(self.byte), self.next.as_usize() ) } } #[cfg(test)] mod tests { use bstr::B; use regex_syntax::hir::Hir; use super::*; #[test] fn zap() { let mut trie = LiteralTrie::forward(); trie.add(b"zapper").unwrap(); trie.add(b"z").unwrap(); trie.add(b"zap").unwrap(); let got = trie.compile_to_hir(); let expected = Hir::concat(vec![ Hir::literal(B("z")), Hir::alternation(vec![ Hir::literal(B("apper")), Hir::empty(), Hir::literal(B("ap")), ]), ]); assert_eq!(expected, got); } #[test] fn maker() { let mut trie = LiteralTrie::forward(); trie.add(b"make").unwrap(); trie.add(b"maple").unwrap(); trie.add(b"maker").unwrap(); let got = trie.compile_to_hir(); let expected = Hir::concat(vec![ Hir::literal(B("ma")), Hir::alternation(vec![ Hir::concat(vec![ Hir::literal(B("ke")), Hir::alternation(vec![Hir::empty(), Hir::literal(B("r"))]), ]), Hir::literal(B("ple")), ]), ]); assert_eq!(expected, got); } } <file_sep>/src/regexset/string.rs use alloc::string::String; use regex_automata::{meta, Input, PatternID, PatternSet, PatternSetIter}; use crate::{Error, RegexSetBuilder}; /// Match multiple, possibly overlapping, regexes in a single search. /// /// A regex set corresponds to the union of zero or more regular expressions. /// That is, a regex set will match a haystack when at least one of its /// constituent regexes matches. A regex set as its formulated here provides a /// touch more power: it will also report *which* regular expressions in the /// set match. Indeed, this is the key difference between regex sets and a /// single `Regex` with many alternates, since only one alternate can match at /// a time. /// /// For example, consider regular expressions to match email addresses and /// domains: `[a-z]+@[a-z]+\.(com|org|net)` and `[a-z]+\.(com|org|net)`. If a /// regex set is constructed from those regexes, then searching the haystack /// `<EMAIL>` will report both regexes as matching. Of course, one /// could accomplish this by compiling each regex on its own and doing two /// searches over the haystack. The key advantage of using a regex set is /// that it will report the matching regexes using a *single pass through the /// haystack*. If one has hundreds or thousands of regexes to match repeatedly /// (like a URL router for a complex web application or a user agent matcher), /// then a regex set *can* realize huge performance gains. /// /// # Limitations /// /// Regex sets are limited to answering the following two questions: /// /// 1. Does any regex in the set match? /// 2. If so, which regexes in the set match? /// /// As with the main [`Regex`][crate::Regex] type, it is cheaper to ask (1) /// instead of (2) since the matching engines can stop after the first match /// is found. /// /// You cannot directly extract [`Match`][crate::Match] or /// [`Captures`][crate::Captures] objects from a regex set. If you need these /// operations, the recommended approach is to compile each pattern in the set /// independently and scan the exact same haystack a second time with those /// independently compiled patterns: /// /// ``` /// use regex::{Regex, RegexSet}; /// /// let patterns = ["foo", "bar"]; /// // Both patterns will match different ranges of this string. /// let hay = "barfoo"; /// /// // Compile a set matching any of our patterns. /// let set = RegexSet::new(patterns).unwrap(); /// // Compile each pattern independently. /// let regexes: Vec<_> = set /// .patterns() /// .iter() /// .map(|pat| Regex::new(pat).unwrap()) /// .collect(); /// /// // Match against the whole set first and identify the individual /// // matching patterns. /// let matches: Vec<&str> = set /// .matches(hay) /// .into_iter() /// // Dereference the match index to get the corresponding /// // compiled pattern. /// .map(|index| &regexes[index]) /// // To get match locations or any other info, we then have to search the /// // exact same haystack again, using our separately-compiled pattern. /// .map(|re| re.find(hay).unwrap().as_str()) /// .collect(); /// /// // Matches arrive in the order the constituent patterns were declared, /// // not the order they appear in the haystack. /// assert_eq!(vec!["foo", "bar"], matches); /// ``` /// /// # Performance /// /// A `RegexSet` has the same performance characteristics as `Regex`. Namely, /// search takes `O(m * n)` time, where `m` is proportional to the size of the /// regex set and `n` is proportional to the length of the haystack. /// /// # Trait implementations /// /// The `Default` trait is implemented for `RegexSet`. The default value /// is an empty set. An empty set can also be explicitly constructed via /// [`RegexSet::empty`]. /// /// # Example /// /// This shows how the above two regexes (for matching email addresses and /// domains) might work: /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new(&[ /// r"[a-z]+@[a-z]+\.(com|org|net)", /// r"[a-z]+\.(com|org|net)", /// ]).unwrap(); /// /// // Ask whether any regexes in the set match. /// assert!(set.is_match("<EMAIL>")); /// /// // Identify which regexes in the set match. /// let matches: Vec<_> = set.matches("<EMAIL>").into_iter().collect(); /// assert_eq!(vec![0, 1], matches); /// /// // Try again, but with a haystack that only matches one of the regexes. /// let matches: Vec<_> = set.matches("example.com").into_iter().collect(); /// assert_eq!(vec![1], matches); /// /// // Try again, but with a haystack that doesn't match any regex in the set. /// let matches: Vec<_> = set.matches("example").into_iter().collect(); /// assert!(matches.is_empty()); /// ``` /// /// Note that it would be possible to adapt the above example to using `Regex` /// with an expression like: /// /// ```text /// (?P<email>[a-z]+@(?P<email_domain>[a-z]+[.](com|org|net)))|(?P<domain>[a-z]+[.](com|org|net)) /// ``` /// /// After a match, one could then inspect the capture groups to figure out /// which alternates matched. The problem is that it is hard to make this /// approach scale when there are many regexes since the overlap between each /// alternate isn't always obvious to reason about. #[derive(Clone)] pub struct RegexSet { pub(crate) meta: meta::Regex, pub(crate) patterns: alloc::sync::Arc<[String]>, } impl RegexSet { /// Create a new regex set with the given regular expressions. /// /// This takes an iterator of `S`, where `S` is something that can produce /// a `&str`. If any of the strings in the iterator are not valid regular /// expressions, then an error is returned. /// /// # Example /// /// Create a new regex set from an iterator of strings: /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); /// assert!(set.is_match("foo")); /// ``` pub fn new<I, S>(exprs: I) -> Result<RegexSet, Error> where S: AsRef<str>, I: IntoIterator<Item = S>, { RegexSetBuilder::new(exprs).build() } /// Create a new empty regex set. /// /// An empty regex never matches anything. /// /// This is a convenience function for `RegexSet::new([])`, but doesn't /// require one to specify the type of the input. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::empty(); /// assert!(set.is_empty()); /// // an empty set matches nothing /// assert!(!set.is_match("")); /// ``` pub fn empty() -> RegexSet { let empty: [&str; 0] = []; RegexSetBuilder::new(empty).build().unwrap() } /// Returns true if and only if one of the regexes in this set matches /// the haystack given. /// /// This method should be preferred if you only need to test whether any /// of the regexes in the set should match, but don't care about *which* /// regexes matched. This is because the underlying matching engine will /// quit immediately after seeing the first match instead of continuing to /// find all matches. /// /// Note that as with searches using [`Regex`](crate::Regex), the /// expression is unanchored by default. That is, if the regex does not /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted /// to match anywhere in the haystack. /// /// # Example /// /// Tests whether a set matches somewhere in a haystack: /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); /// assert!(set.is_match("foo")); /// assert!(!set.is_match("☃")); /// ``` #[inline] pub fn is_match(&self, haystack: &str) -> bool { self.is_match_at(haystack, 0) } /// Returns true if and only if one of the regexes in this set matches the /// haystack given, with the search starting at the offset given. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. /// /// # Panics /// /// This panics when `start >= haystack.len() + 1`. /// /// # Example /// /// This example shows the significance of `start`. Namely, consider a /// haystack `foobar` and a desire to execute a search starting at offset /// `3`. You could search a substring explicitly, but then the look-around /// assertions won't work correctly. Instead, you can use this method to /// specify the start position of a search. /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); /// let hay = "foobar"; /// // We get a match here, but it's probably not intended. /// assert!(set.is_match(&hay[3..])); /// // No match because the assertions take the context into account. /// assert!(!set.is_match_at(hay, 3)); /// ``` #[inline] pub fn is_match_at(&self, haystack: &str, start: usize) -> bool { self.meta.is_match(Input::new(haystack).span(start..haystack.len())) } /// Returns the set of regexes that match in the given haystack. /// /// The set returned contains the index of each regex that matches in /// the given haystack. The index is in correspondence with the order of /// regular expressions given to `RegexSet`'s constructor. /// /// The set can also be used to iterate over the matched indices. The order /// of iteration is always ascending with respect to the matching indices. /// /// Note that as with searches using [`Regex`](crate::Regex), the /// expression is unanchored by default. That is, if the regex does not /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted /// to match anywhere in the haystack. /// /// # Example /// /// Tests which regular expressions match the given haystack: /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([ /// r"\w+", /// r"\d+", /// r"\pL+", /// r"foo", /// r"bar", /// r"barfoo", /// r"foobar", /// ]).unwrap(); /// let matches: Vec<_> = set.matches("foobar").into_iter().collect(); /// assert_eq!(matches, vec![0, 2, 3, 4, 6]); /// /// // You can also test whether a particular regex matched: /// let matches = set.matches("foobar"); /// assert!(!matches.matched(5)); /// assert!(matches.matched(6)); /// ``` #[inline] pub fn matches(&self, haystack: &str) -> SetMatches { self.matches_at(haystack, 0) } /// Returns the set of regexes that match in the given haystack. /// /// The set returned contains the index of each regex that matches in /// the given haystack. The index is in correspondence with the order of /// regular expressions given to `RegexSet`'s constructor. /// /// The set can also be used to iterate over the matched indices. The order /// of iteration is always ascending with respect to the matching indices. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. /// /// # Panics /// /// This panics when `start >= haystack.len() + 1`. /// /// # Example /// /// Tests which regular expressions match the given haystack: /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); /// let hay = "foobar"; /// // We get matches here, but it's probably not intended. /// let matches: Vec<_> = set.matches(&hay[3..]).into_iter().collect(); /// assert_eq!(matches, vec![0, 1]); /// // No matches because the assertions take the context into account. /// let matches: Vec<_> = set.matches_at(hay, 3).into_iter().collect(); /// assert_eq!(matches, vec![]); /// ``` #[inline] pub fn matches_at(&self, haystack: &str, start: usize) -> SetMatches { let input = Input::new(haystack).span(start..haystack.len()); let mut patset = PatternSet::new(self.meta.pattern_len()); self.meta.which_overlapping_matches(&input, &mut patset); SetMatches(patset) } /// Returns the same as matches, but starts the search at the given /// offset and stores the matches into the slice given. /// /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. /// /// `matches` must have a length that is at least the number of regexes /// in this set. /// /// This method returns true if and only if at least one member of /// `matches` is true after executing the set against `haystack`. #[doc(hidden)] #[inline] pub fn matches_read_at( &self, matches: &mut [bool], haystack: &str, start: usize, ) -> bool { // This is pretty dumb. We should try to fix this, but the // regex-automata API doesn't provide a way to store matches in an // arbitrary &mut [bool]. Thankfully, this API is is doc(hidden) and // thus not public... But regex-capi currently uses it. We should // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet // is in regex-automata, not regex. So maybe we should just accept a // 'SetMatches', which is basically just a newtype around PatternSet. let mut patset = PatternSet::new(self.meta.pattern_len()); let mut input = Input::new(haystack); input.set_start(start); self.meta.which_overlapping_matches(&input, &mut patset); for pid in patset.iter() { matches[pid] = true; } !patset.is_empty() } /// An alias for `matches_read_at` to preserve backward compatibility. /// /// The `regex-capi` crate used this method, so to avoid breaking that /// crate, we continue to export it as an undocumented API. #[doc(hidden)] #[inline] pub fn read_matches_at( &self, matches: &mut [bool], haystack: &str, start: usize, ) -> bool { self.matches_read_at(matches, haystack, start) } /// Returns the total number of regexes in this set. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// assert_eq!(0, RegexSet::empty().len()); /// assert_eq!(1, RegexSet::new([r"[0-9]"]).unwrap().len()); /// assert_eq!(2, RegexSet::new([r"[0-9]", r"[a-z]"]).unwrap().len()); /// ``` #[inline] pub fn len(&self) -> usize { self.meta.pattern_len() } /// Returns `true` if this set contains no regexes. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// assert!(RegexSet::empty().is_empty()); /// assert!(!RegexSet::new([r"[0-9]"]).unwrap().is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.meta.pattern_len() == 0 } /// Returns the regex patterns that this regex set was constructed from. /// /// This function can be used to determine the pattern for a match. The /// slice returned has exactly as many patterns givens to this regex set, /// and the order of the slice is the same as the order of the patterns /// provided to the set. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new(&[ /// r"\w+", /// r"\d+", /// r"\pL+", /// r"foo", /// r"bar", /// r"barfoo", /// r"foobar", /// ]).unwrap(); /// let matches: Vec<_> = set /// .matches("foobar") /// .into_iter() /// .map(|index| &set.patterns()[index]) /// .collect(); /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]); /// ``` #[inline] pub fn patterns(&self) -> &[String] { &self.patterns } } impl Default for RegexSet { fn default() -> Self { RegexSet::empty() } } /// A set of matches returned by a regex set. /// /// Values of this type are constructed by [`RegexSet::matches`]. #[derive(Clone, Debug)] pub struct SetMatches(PatternSet); impl SetMatches { /// Whether this set contains any matches. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new(&[ /// r"[a-z]+@[a-z]+\.(com|org|net)", /// r"[a-z]+\.(com|org|net)", /// ]).unwrap(); /// let matches = set.matches("<EMAIL>"); /// assert!(matches.matched_any()); /// ``` #[inline] pub fn matched_any(&self) -> bool { !self.0.is_empty() } /// Whether the regex at the given index matched. /// /// The index for a regex is determined by its insertion order upon the /// initial construction of a `RegexSet`, starting at `0`. /// /// # Panics /// /// If `index` is greater than or equal to the number of regexes in the /// original set that produced these matches. Equivalently, when `index` /// is greater than or equal to [`SetMatches::len`]. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([ /// r"[a-z]+@[a-z]+\.(com|org|net)", /// r"[a-z]+\.(com|org|net)", /// ]).unwrap(); /// let matches = set.matches("example.com"); /// assert!(!matches.matched(0)); /// assert!(matches.matched(1)); /// ``` #[inline] pub fn matched(&self, index: usize) -> bool { self.0.contains(PatternID::new_unchecked(index)) } /// The total number of regexes in the set that created these matches. /// /// **WARNING:** This always returns the same value as [`RegexSet::len`]. /// In particular, it does *not* return the number of elements yielded by /// [`SetMatches::iter`]. The only way to determine the total number of /// matched regexes is to iterate over them. /// /// # Example /// /// Notice that this method returns the total number of regexes in the /// original set, and *not* the total number of regexes that matched. /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([ /// r"[a-z]+@[a-z]+\.(com|org|net)", /// r"[a-z]+\.(com|org|net)", /// ]).unwrap(); /// let matches = set.matches("example.com"); /// // Total number of patterns that matched. /// assert_eq!(1, matches.iter().count()); /// // Total number of patterns in the set. /// assert_eq!(2, matches.len()); /// ``` #[inline] pub fn len(&self) -> usize { self.0.capacity() } /// Returns an iterator over the indices of the regexes that matched. /// /// This will always produces matches in ascending order, where the index /// yielded corresponds to the index of the regex that matched with respect /// to its position when initially building the set. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([ /// r"[0-9]", /// r"[a-z]", /// r"[A-Z]", /// r"\p{Greek}", /// ]).unwrap(); /// let hay = "βa1"; /// let matches: Vec<_> = set.matches(hay).iter().collect(); /// assert_eq!(matches, vec![0, 1, 3]); /// ``` /// /// Note that `SetMatches` also implemnets the `IntoIterator` trait, so /// this method is not always needed. For example: /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([ /// r"[0-9]", /// r"[a-z]", /// r"[A-Z]", /// r"\p{Greek}", /// ]).unwrap(); /// let hay = "βa1"; /// let mut matches = vec![]; /// for index in set.matches(hay) { /// matches.push(index); /// } /// assert_eq!(matches, vec![0, 1, 3]); /// ``` #[inline] pub fn iter(&self) -> SetMatchesIter<'_> { SetMatchesIter(self.0.iter()) } } impl IntoIterator for SetMatches { type IntoIter = SetMatchesIntoIter; type Item = usize; fn into_iter(self) -> Self::IntoIter { let it = 0..self.0.capacity(); SetMatchesIntoIter { patset: self.0, it } } } impl<'a> IntoIterator for &'a SetMatches { type IntoIter = SetMatchesIter<'a>; type Item = usize; fn into_iter(self) -> Self::IntoIter { self.iter() } } /// An owned iterator over the set of matches from a regex set. /// /// This will always produces matches in ascending order of index, where the /// index corresponds to the index of the regex that matched with respect to /// its position when initially building the set. /// /// This iterator is created by calling `SetMatches::into_iter` via the /// `IntoIterator` trait. This is automatically done in `for` loops. /// /// # Example /// /// ``` /// use regex::RegexSet; /// /// let set = RegexSet::new([ /// r"[0-9]", /// r"[a-z]", /// r"[A-Z]", /// r"\p{Greek}", /// ]).unwrap(); /// let hay = "βa1"; /// let mut matches = vec![]; /// for index in set.matches(hay) { /// matches.push(index); /// } /// assert_eq!(matches, vec![0, 1, 3]); /// ``` #[derive(Debug)] pub struct SetMatchesIntoIter { patset: PatternSet, it: core::ops::Range<usize>, } impl Iterator for SetMatchesIntoIter { type Item = usize; fn next(&mut self) -> Option<usize> { loop { let id = self.it.next()?; if self.patset.contains(PatternID::new_unchecked(id)) { return Some(id); } } } fn size_hint(&self) -> (usize, Option<usize>) { self.it.size_hint() } } impl DoubleEndedIterator for SetMatchesIntoIter { fn next_back(&mut self) -> Option<usize> { loop { let id = self.it.next_back()?; if self.patset.contains(PatternID::new_unchecked(id)) { return Some(id); } } } } impl core::iter::FusedIterator for SetMatchesIntoIter {} /// A borrowed iterator over the set of matches from a regex set. /// /// The lifetime `'a` refers to the lifetime of the [`SetMatches`] value that /// created this iterator. /// /// This will always produces matches in ascending order, where the index /// corresponds to the index of the regex that matched with respect to its /// position when initially building the set. /// /// This iterator is created by the [`SetMatches::iter`] method. #[derive(Clone, Debug)] pub struct SetMatchesIter<'a>(PatternSetIter<'a>); impl<'a> Iterator for SetMatchesIter<'a> { type Item = usize; fn next(&mut self) -> Option<usize> { self.0.next().map(|pid| pid.as_usize()) } fn size_hint(&self) -> (usize, Option<usize>) { self.0.size_hint() } } impl<'a> DoubleEndedIterator for SetMatchesIter<'a> { fn next_back(&mut self) -> Option<usize> { self.0.next_back().map(|pid| pid.as_usize()) } } impl<'a> core::iter::FusedIterator for SetMatchesIter<'a> {} impl core::fmt::Debug for RegexSet { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "RegexSet({:?})", self.patterns()) } } <file_sep>/.github/ISSUE_TEMPLATE/bug_report.md --- name: Bug report about: An issue with the current behavior of the regex crate. title: '' labels: '' assignees: '' --- #### What version of regex are you using? If it isn't the latest version, then please upgrade and check whether the bug is still present. #### Describe the bug at a high level. Give a brief description of the actual problem you're observing. #### What are the steps to reproduce the behavior? This section should almost always provide a COMPLETE Rust program that others can compile and run with Cargo. It should be as small as possible. If providing a small and simple reproduction is not easy, please explain why and the maintainers will help you figure out next steps. #### What is the actual behavior? If you provide a Rust program in the previous section, then this should be the output of that program. #### What is the expected behavior? What do you expect the output to be? <file_sep>/regex-cli/args/pikevm.rs use { anyhow::Context, lexopt::{Arg, Parser}, regex_automata::nfa::thompson::{pikevm, NFA}, }; use crate::args::{self, flags, Configurable, Usage}; /// This exposes the configuration knobs for a `PikeVM`. #[derive(Debug, Default)] pub struct Config { pikevm: pikevm::Config, } impl Config { /// Return a `pikevm::Config` object from this configuration. pub fn pikevm(&self) -> anyhow::Result<pikevm::Config> { Ok(self.pikevm.clone()) } /// Builds a `PikeVM` regex engine from the NFA given. pub fn from_nfa(&self, nfa: &NFA) -> anyhow::Result<pikevm::PikeVM> { pikevm::Builder::new() .configure(self.pikevm()?) .build_from_nfa(nfa.clone()) .context("failed to build PikeVM matcher") } } impl Configurable for Config { fn configure( &mut self, p: &mut Parser, arg: &mut Arg, ) -> anyhow::Result<bool> { match *arg { Arg::Short('k') | Arg::Long("match-kind") => { let kind: flags::MatchKind = args::parse(p, "-k/--match-kind")?; self.pikevm = self.pikevm.clone().match_kind(kind.kind); } _ => return Ok(false), } Ok(true) } fn usage(&self) -> &[Usage] { const USAGES: &'static [Usage] = &[flags::MatchKind::USAGE]; USAGES } } <file_sep>/regex-syntax/src/hir/print.rs /*! This module provides a regular expression printer for `Hir`. */ use core::fmt; use crate::{ hir::{ self, visitor::{self, Visitor}, Hir, HirKind, }, is_meta_character, }; /// A builder for constructing a printer. /// /// Note that since a printer doesn't have any configuration knobs, this type /// remains unexported. #[derive(Clone, Debug)] struct PrinterBuilder { _priv: (), } impl Default for PrinterBuilder { fn default() -> PrinterBuilder { PrinterBuilder::new() } } impl PrinterBuilder { fn new() -> PrinterBuilder { PrinterBuilder { _priv: () } } fn build(&self) -> Printer { Printer { _priv: () } } } /// A printer for a regular expression's high-level intermediate /// representation. /// /// A printer converts a high-level intermediate representation (HIR) to a /// regular expression pattern string. This particular printer uses constant /// stack space and heap space proportional to the size of the HIR. /// /// Since this printer is only using the HIR, the pattern it prints will likely /// not resemble the original pattern at all. For example, a pattern like /// `\pL` will have its entire class written out. /// /// The purpose of this printer is to provide a means to mutate an HIR and then /// build a regular expression from the result of that mutation. (A regex /// library could provide a constructor from this HIR explicitly, but that /// creates an unnecessary public coupling between the regex library and this /// specific HIR representation.) #[derive(Debug)] pub struct Printer { _priv: (), } impl Printer { /// Create a new printer. pub fn new() -> Printer { PrinterBuilder::new().build() } /// Print the given `Ast` to the given writer. The writer must implement /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used /// here are a `fmt::Formatter` (which is available in `fmt::Display` /// implementations) or a `&mut String`. pub fn print<W: fmt::Write>(&mut self, hir: &Hir, wtr: W) -> fmt::Result { visitor::visit(hir, Writer { wtr }) } } #[derive(Debug)] struct Writer<W> { wtr: W, } impl<W: fmt::Write> Visitor for Writer<W> { type Output = (); type Err = fmt::Error; fn finish(self) -> fmt::Result { Ok(()) } fn visit_pre(&mut self, hir: &Hir) -> fmt::Result { match *hir.kind() { HirKind::Empty => { // Technically an empty sub-expression could be "printed" by // just ignoring it, but in practice, you could have a // repetition operator attached to an empty expression, and you // really need something in the concrete syntax to make that // work as you'd expect. self.wtr.write_str(r"(?:)")?; } // Repetition operators are strictly suffix oriented. HirKind::Repetition(_) => {} HirKind::Literal(hir::Literal(ref bytes)) => { // See the comment on the 'Concat' and 'Alternation' case below // for why we put parens here. Literals are, conceptually, // a special case of concatenation where each element is a // character. The HIR flattens this into a Box<[u8]>, but we // still need to treat it like a concatenation for correct // printing. As a special case, we don't write parens if there // is only one character. One character means there is no // concat so we don't need parens. Adding parens would still be // correct, but we drop them here because it tends to create // rather noisy regexes even in simple cases. let result = core::str::from_utf8(bytes); let len = result.map_or(bytes.len(), |s| s.chars().count()); if len > 1 { self.wtr.write_str(r"(?:")?; } match result { Ok(string) => { for c in string.chars() { self.write_literal_char(c)?; } } Err(_) => { for &b in bytes.iter() { self.write_literal_byte(b)?; } } } if len > 1 { self.wtr.write_str(r")")?; } } HirKind::Class(hir::Class::Unicode(ref cls)) => { if cls.ranges().is_empty() { return self.wtr.write_str("[a&&b]"); } self.wtr.write_str("[")?; for range in cls.iter() { if range.start() == range.end() { self.write_literal_char(range.start())?; } else if u32::from(range.start()) + 1 == u32::from(range.end()) { self.write_literal_char(range.start())?; self.write_literal_char(range.end())?; } else { self.write_literal_char(range.start())?; self.wtr.write_str("-")?; self.write_literal_char(range.end())?; } } self.wtr.write_str("]")?; } HirKind::Class(hir::Class::Bytes(ref cls)) => { if cls.ranges().is_empty() { return self.wtr.write_str("[a&&b]"); } self.wtr.write_str("(?-u:[")?; for range in cls.iter() { if range.start() == range.end() { self.write_literal_class_byte(range.start())?; } else if range.start() + 1 == range.end() { self.write_literal_class_byte(range.start())?; self.write_literal_class_byte(range.end())?; } else { self.write_literal_class_byte(range.start())?; self.wtr.write_str("-")?; self.write_literal_class_byte(range.end())?; } } self.wtr.write_str("])")?; } HirKind::Look(ref look) => match *look { hir::Look::Start => { self.wtr.write_str(r"\A")?; } hir::Look::End => { self.wtr.write_str(r"\z")?; } hir::Look::StartLF => { self.wtr.write_str("(?m:^)")?; } hir::Look::EndLF => { self.wtr.write_str("(?m:$)")?; } hir::Look::StartCRLF => { self.wtr.write_str("(?mR:^)")?; } hir::Look::EndCRLF => { self.wtr.write_str("(?mR:$)")?; } hir::Look::WordAscii => { self.wtr.write_str(r"(?-u:\b)")?; } hir::Look::WordAsciiNegate => { self.wtr.write_str(r"(?-u:\B)")?; } hir::Look::WordUnicode => { self.wtr.write_str(r"\b")?; } hir::Look::WordUnicodeNegate => { self.wtr.write_str(r"\B")?; } }, HirKind::Capture(hir::Capture { ref name, .. }) => { self.wtr.write_str("(")?; if let Some(ref name) = *name { write!(self.wtr, "?P<{}>", name)?; } } // Why do this? Wrapping concats and alts in non-capturing groups // is not *always* necessary, but is sometimes necessary. For // example, 'concat(a, alt(b, c))' should be written as 'a(?:b|c)' // and not 'ab|c'. The former is clearly the intended meaning, but // the latter is actually 'alt(concat(a, b), c)'. // // It would be possible to only group these things in cases where // it's strictly necessary, but it requires knowing the parent // expression. And since this technique is simpler and always // correct, we take this route. More to the point, it is a non-goal // of an HIR printer to show a nice easy-to-read regex. Indeed, // its construction forbids it from doing so. Therefore, inserting // extra groups where they aren't necessary is perfectly okay. HirKind::Concat(_) | HirKind::Alternation(_) => { self.wtr.write_str(r"(?:")?; } } Ok(()) } fn visit_post(&mut self, hir: &Hir) -> fmt::Result { match *hir.kind() { // Handled during visit_pre HirKind::Empty | HirKind::Literal(_) | HirKind::Class(_) | HirKind::Look(_) => {} HirKind::Repetition(ref x) => { match (x.min, x.max) { (0, Some(1)) => { self.wtr.write_str("?")?; } (0, None) => { self.wtr.write_str("*")?; } (1, None) => { self.wtr.write_str("+")?; } (1, Some(1)) => { // 'a{1}' and 'a{1}?' are exactly equivalent to 'a'. return Ok(()); } (m, None) => { write!(self.wtr, "{{{},}}", m)?; } (m, Some(n)) if m == n => { write!(self.wtr, "{{{}}}", m)?; // a{m} and a{m}? are always exactly equivalent. return Ok(()); } (m, Some(n)) => { write!(self.wtr, "{{{},{}}}", m, n)?; } } if !x.greedy { self.wtr.write_str("?")?; } } HirKind::Capture(_) | HirKind::Concat(_) | HirKind::Alternation(_) => { self.wtr.write_str(r")")?; } } Ok(()) } fn visit_alternation_in(&mut self) -> fmt::Result { self.wtr.write_str("|") } } impl<W: fmt::Write> Writer<W> { fn write_literal_char(&mut self, c: char) -> fmt::Result { if is_meta_character(c) { self.wtr.write_str("\\")?; } self.wtr.write_char(c) } fn write_literal_byte(&mut self, b: u8) -> fmt::Result { if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { self.write_literal_char(char::try_from(b).unwrap()) } else { write!(self.wtr, "(?-u:\\x{:02X})", b) } } fn write_literal_class_byte(&mut self, b: u8) -> fmt::Result { if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { self.write_literal_char(char::try_from(b).unwrap()) } else { write!(self.wtr, "\\x{:02X}", b) } } } #[cfg(test)] mod tests { use alloc::{ boxed::Box, string::{String, ToString}, }; use crate::ParserBuilder; use super::*; fn roundtrip(given: &str, expected: &str) { roundtrip_with(|b| b, given, expected); } fn roundtrip_bytes(given: &str, expected: &str) { roundtrip_with(|b| b.utf8(false), given, expected); } fn roundtrip_with<F>(mut f: F, given: &str, expected: &str) where F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder, { let mut builder = ParserBuilder::new(); f(&mut builder); let hir = builder.build().parse(given).unwrap(); let mut printer = Printer::new(); let mut dst = String::new(); printer.print(&hir, &mut dst).unwrap(); // Check that the result is actually valid. builder.build().parse(&dst).unwrap(); assert_eq!(expected, dst); } #[test] fn print_literal() { roundtrip("a", "a"); roundtrip(r"\xff", "\u{FF}"); roundtrip_bytes(r"\xff", "\u{FF}"); roundtrip_bytes(r"(?-u)\xff", r"(?-u:\xFF)"); roundtrip("☃", "☃"); } #[test] fn print_class() { roundtrip(r"[a]", r"a"); roundtrip(r"[ab]", r"[ab]"); roundtrip(r"[a-z]", r"[a-z]"); roundtrip(r"[a-z--b-c--x-y]", r"[ad-wz]"); roundtrip(r"[^\x01-\u{10FFFF}]", "\u{0}"); roundtrip(r"[-]", r"\-"); roundtrip(r"[☃-⛄]", r"[☃-⛄]"); roundtrip(r"(?-u)[a]", r"a"); roundtrip(r"(?-u)[ab]", r"(?-u:[ab])"); roundtrip(r"(?-u)[a-z]", r"(?-u:[a-z])"); roundtrip_bytes(r"(?-u)[a-\xFF]", r"(?-u:[a-\xFF])"); // The following test that the printer escapes meta characters // in character classes. roundtrip(r"[\[]", r"\["); roundtrip(r"[Z-_]", r"[Z-_]"); roundtrip(r"[Z-_--Z]", r"[\[-_]"); // The following test that the printer escapes meta characters // in byte oriented character classes. roundtrip_bytes(r"(?-u)[\[]", r"\["); roundtrip_bytes(r"(?-u)[Z-_]", r"(?-u:[Z-_])"); roundtrip_bytes(r"(?-u)[Z-_--Z]", r"(?-u:[\[-_])"); // This tests that an empty character class is correctly roundtripped. #[cfg(feature = "unicode-gencat")] roundtrip(r"\P{any}", r"[a&&b]"); roundtrip_bytes(r"(?-u)[^\x00-\xFF]", r"[a&&b]"); } #[test] fn print_anchor() { roundtrip(r"^", r"\A"); roundtrip(r"$", r"\z"); roundtrip(r"(?m)^", r"(?m:^)"); roundtrip(r"(?m)$", r"(?m:$)"); } #[test] fn print_word_boundary() { roundtrip(r"\b", r"\b"); roundtrip(r"\B", r"\B"); roundtrip(r"(?-u)\b", r"(?-u:\b)"); roundtrip_bytes(r"(?-u)\B", r"(?-u:\B)"); } #[test] fn print_repetition() { roundtrip("a?", "a?"); roundtrip("a??", "a??"); roundtrip("(?U)a?", "a??"); roundtrip("a*", "a*"); roundtrip("a*?", "a*?"); roundtrip("(?U)a*", "a*?"); roundtrip("a+", "a+"); roundtrip("a+?", "a+?"); roundtrip("(?U)a+", "a+?"); roundtrip("a{1}", "a"); roundtrip("a{2}", "a{2}"); roundtrip("a{1,}", "a+"); roundtrip("a{1,5}", "a{1,5}"); roundtrip("a{1}?", "a"); roundtrip("a{2}?", "a{2}"); roundtrip("a{1,}?", "a+?"); roundtrip("a{1,5}?", "a{1,5}?"); roundtrip("(?U)a{1}", "a"); roundtrip("(?U)a{2}", "a{2}"); roundtrip("(?U)a{1,}", "a+?"); roundtrip("(?U)a{1,5}", "a{1,5}?"); // Test that various zero-length repetitions always translate to an // empty regex. This is more a property of HIR's smart constructors // than the printer though. roundtrip("a{0}", "(?:)"); roundtrip("(?:ab){0}", "(?:)"); #[cfg(feature = "unicode-gencat")] { roundtrip(r"\p{any}{0}", "(?:)"); roundtrip(r"\P{any}{0}", "(?:)"); } } #[test] fn print_group() { roundtrip("()", "((?:))"); roundtrip("(?P<foo>)", "(?P<foo>(?:))"); roundtrip("(?:)", "(?:)"); roundtrip("(a)", "(a)"); roundtrip("(?P<foo>a)", "(?P<foo>a)"); roundtrip("(?:a)", "a"); roundtrip("((((a))))", "((((a))))"); } #[test] fn print_alternation() { roundtrip("|", "(?:(?:)|(?:))"); roundtrip("||", "(?:(?:)|(?:)|(?:))"); roundtrip("a|b", "[ab]"); roundtrip("ab|cd", "(?:(?:ab)|(?:cd))"); roundtrip("a|b|c", "[a-c]"); roundtrip("ab|cd|ef", "(?:(?:ab)|(?:cd)|(?:ef))"); roundtrip("foo|bar|quux", "(?:(?:foo)|(?:bar)|(?:quux))"); } // This is a regression test that stresses a peculiarity of how the HIR // is both constructed and printed. Namely, it is legal for a repetition // to directly contain a concatenation. This particular construct isn't // really possible to build from the concrete syntax directly, since you'd // be forced to put the concatenation into (at least) a non-capturing // group. Concurrently, the printer doesn't consider this case and just // kind of naively prints the child expression and tacks on the repetition // operator. // // As a result, if you attached '+' to a 'concat(a, b)', the printer gives // you 'ab+', but clearly it really should be '(?:ab)+'. // // This bug isn't easy to surface because most ways of building an HIR // come directly from the concrete syntax, and as mentioned above, it just // isn't possible to build this kind of HIR from the concrete syntax. // Nevertheless, this is definitely a bug. // // See: https://github.com/rust-lang/regex/issues/731 #[test] fn regression_repetition_concat() { let expr = Hir::concat(alloc::vec![ Hir::literal("x".as_bytes()), Hir::repetition(hir::Repetition { min: 1, max: None, greedy: true, sub: Box::new(Hir::literal("ab".as_bytes())), }), Hir::literal("y".as_bytes()), ]); assert_eq!(r"(?:x(?:ab)+y)", expr.to_string()); let expr = Hir::concat(alloc::vec![ Hir::look(hir::Look::Start), Hir::repetition(hir::Repetition { min: 1, max: None, greedy: true, sub: Box::new(Hir::concat(alloc::vec![ Hir::look(hir::Look::Start), Hir::look(hir::Look::End), ])), }), Hir::look(hir::Look::End), ]); assert_eq!(r"(?:\A\A\z\z)", expr.to_string()); } // Just like regression_repetition_concat, but with the repetition using // an alternation as a child expression instead. // // See: https://github.com/rust-lang/regex/issues/731 #[test] fn regression_repetition_alternation() { let expr = Hir::concat(alloc::vec![ Hir::literal("ab".as_bytes()), Hir::repetition(hir::Repetition { min: 1, max: None, greedy: true, sub: Box::new(Hir::alternation(alloc::vec![ Hir::literal("cd".as_bytes()), Hir::literal("ef".as_bytes()), ])), }), Hir::literal("gh".as_bytes()), ]); assert_eq!(r"(?:(?:ab)(?:(?:cd)|(?:ef))+(?:gh))", expr.to_string()); let expr = Hir::concat(alloc::vec![ Hir::look(hir::Look::Start), Hir::repetition(hir::Repetition { min: 1, max: None, greedy: true, sub: Box::new(Hir::alternation(alloc::vec![ Hir::look(hir::Look::Start), Hir::look(hir::Look::End), ])), }), Hir::look(hir::Look::End), ]); assert_eq!(r"(?:\A(?:\A|\z)\z)", expr.to_string()); } // This regression test is very similar in flavor to // regression_repetition_concat in that the root of the issue lies in a // peculiarity of how the HIR is represented and how the printer writes it // out. Like the other regression, this one is also rooted in the fact that // you can't produce the peculiar HIR from the concrete syntax. Namely, you // just can't have a 'concat(a, alt(b, c))' because the 'alt' will normally // be in (at least) a non-capturing group. Why? Because the '|' has very // low precedence (lower that concatenation), and so something like 'ab|c' // is actually 'alt(ab, c)'. // // See: https://github.com/rust-lang/regex/issues/516 #[test] fn regression_alternation_concat() { let expr = Hir::concat(alloc::vec![ Hir::literal("ab".as_bytes()), Hir::alternation(alloc::vec![ Hir::literal("mn".as_bytes()), Hir::literal("xy".as_bytes()), ]), ]); assert_eq!(r"(?:(?:ab)(?:(?:mn)|(?:xy)))", expr.to_string()); let expr = Hir::concat(alloc::vec![ Hir::look(hir::Look::Start), Hir::alternation(alloc::vec![ Hir::look(hir::Look::Start), Hir::look(hir::Look::End), ]), ]); assert_eq!(r"(?:\A(?:\A|\z))", expr.to_string()); } } <file_sep>/regex-lite/src/utf8.rs /// Returns true if and only if the given byte is considered a word character. /// This only applies to ASCII. pub(crate) fn is_word_byte(b: u8) -> bool { const fn mkwordset() -> [bool; 256] { // FIXME: Use as_usize() once const functions in traits are stable. let mut set = [false; 256]; set[b'_' as usize] = true; let mut byte = b'0'; while byte <= b'9' { set[byte as usize] = true; byte += 1; } byte = b'A'; while byte <= b'Z' { set[byte as usize] = true; byte += 1; } byte = b'a'; while byte <= b'z' { set[byte as usize] = true; byte += 1; } set } const WORD: [bool; 256] = mkwordset(); WORD[b as usize] } /// The accept state index. When we enter this state, we know we've found a /// valid Unicode scalar value. const ACCEPT: usize = 12; /// The reject state index. When we enter this state, we know that we've found /// invalid UTF-8. const REJECT: usize = 0; /// Like `decode`, but automatically converts the `None` case to the /// replacement codepoint. pub(crate) fn decode_lossy<B: AsRef<[u8]>>(slice: B) -> (char, usize) { match decode(slice) { (Some(ch), size) => (ch, size), (None, size) => ('\u{FFFD}', size), } } /// UTF-8 decode a single Unicode scalar value from the beginning of a slice. /// /// When successful, the corresponding Unicode scalar value is returned along /// with the number of bytes it was encoded with. The number of bytes consumed /// for a successful decode is always between 1 and 4, inclusive. /// /// When unsuccessful, `None` is returned along with the number of bytes that /// make up a maximal prefix of a valid UTF-8 code unit sequence. In this case, /// the number of bytes consumed is always between 0 and 3, inclusive, where /// 0 is only returned when `slice` is empty. pub(crate) fn decode<B: AsRef<[u8]>>(slice: B) -> (Option<char>, usize) { let slice = slice.as_ref(); match slice.get(0) { None => return (None, 0), Some(&b) if b <= 0x7F => return (Some(b as char), 1), _ => {} } let (mut state, mut cp, mut i) = (ACCEPT, 0, 0); while i < slice.len() { decode_step(&mut state, &mut cp, slice[i]); i += 1; if state == ACCEPT { // OK since `decode_step` guarantees that `cp` is a valid Unicode // scalar value in an ACCEPT state. // // We don't have to use safe code here, but do so because perf // isn't our primary objective in regex-lite. let ch = char::from_u32(cp).unwrap(); return (Some(ch), i); } else if state == REJECT { // At this point, we always want to advance at least one byte. return (None, core::cmp::max(1, i.saturating_sub(1))); } } (None, i) } /// Transitions to the next state and updates `cp` while it does. fn decode_step(state: &mut usize, cp: &mut u32, b: u8) { // Splits the space of all bytes into equivalence classes, such that // any byte in the same class can never discriminate between whether a // particular sequence is valid UTF-8 or not. #[cfg_attr(rustfmt, rustfmt::skip)] const CLASSES: [u8; 256] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, ]; // A state machine taken from `bstr` which was in turn adapted from: // https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ #[cfg_attr(rustfmt, rustfmt::skip)] const STATES_FORWARD: &'static [u8] = &[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 36, 60, 96, 84, 0, 0, 0, 48, 72, 0, 12, 0, 0, 0, 0, 0, 12, 0, 12, 0, 0, 0, 24, 0, 0, 0, 0, 0, 24, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 0, 36, 0, 0, 0, 36, 0, 0, 0, 0, 0, 36, 0, 36, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; let class = CLASSES[usize::from(b)]; if *state == ACCEPT { *cp = (0xFF >> class) & (b as u32); } else { *cp = (b as u32 & 0b111111) | (*cp << 6); } *state = usize::from(STATES_FORWARD[*state + usize::from(class)]); } #[cfg(test)] mod tests { use alloc::{vec, vec::Vec}; use super::*; #[test] fn decode_valid() { fn d(mut s: &str) -> Vec<char> { let mut chars = vec![]; while !s.is_empty() { let (ch, size) = decode(s.as_bytes()); s = &s[size..]; chars.push(ch.unwrap()); } chars } assert_eq!(vec!['☃'], d("☃")); assert_eq!(vec!['☃', '☃'], d("☃☃")); assert_eq!(vec!['α', 'β', 'γ', 'δ', 'ε'], d("αβγδε")); assert_eq!(vec!['☃', '⛄', '⛇'], d("☃⛄⛇")); assert_eq!(vec!['𝗮', '𝗯', '𝗰', '𝗱', '𝗲'], d("𝗮𝗯𝗰𝗱𝗲")); } #[test] fn decode_invalid() { let (ch, size) = decode(b""); assert_eq!(None, ch); assert_eq!(0, size); let (ch, size) = decode(b"\xFF"); assert_eq!(None, ch); assert_eq!(1, size); let (ch, size) = decode(b"\xCE\xF0"); assert_eq!(None, ch); assert_eq!(1, size); let (ch, size) = decode(b"\xE2\x98\xF0"); assert_eq!(None, ch); assert_eq!(2, size); let (ch, size) = decode(b"\xF0\x9D\x9D"); assert_eq!(None, ch); assert_eq!(3, size); let (ch, size) = decode(b"\xF0\x9D\x9D\xF0"); assert_eq!(None, ch); assert_eq!(3, size); let (ch, size) = decode(b"\xF0\x82\x82\xAC"); assert_eq!(None, ch); assert_eq!(1, size); let (ch, size) = decode(b"\xED\xA0\x80"); assert_eq!(None, ch); assert_eq!(1, size); let (ch, size) = decode(b"\xCEa"); assert_eq!(None, ch); assert_eq!(1, size); let (ch, size) = decode(b"\xE2\x98a"); assert_eq!(None, ch); assert_eq!(2, size); let (ch, size) = decode(b"\xF0\x9D\x9Ca"); assert_eq!(None, ch); assert_eq!(3, size); } #[test] fn decode_lossily() { let (ch, size) = decode_lossy(b""); assert_eq!('\u{FFFD}', ch); assert_eq!(0, size); let (ch, size) = decode_lossy(b"\xFF"); assert_eq!('\u{FFFD}', ch); assert_eq!(1, size); let (ch, size) = decode_lossy(b"\xCE\xF0"); assert_eq!('\u{FFFD}', ch); assert_eq!(1, size); let (ch, size) = decode_lossy(b"\xE2\x98\xF0"); assert_eq!('\u{FFFD}', ch); assert_eq!(2, size); let (ch, size) = decode_lossy(b"\xF0\x9D\x9D\xF0"); assert_eq!('\u{FFFD}', ch); assert_eq!(3, size); let (ch, size) = decode_lossy(b"\xF0\x82\x82\xAC"); assert_eq!('\u{FFFD}', ch); assert_eq!(1, size); let (ch, size) = decode_lossy(b"\xED\xA0\x80"); assert_eq!('\u{FFFD}', ch); assert_eq!(1, size); let (ch, size) = decode_lossy(b"\xCEa"); assert_eq!('\u{FFFD}', ch); assert_eq!(1, size); let (ch, size) = decode_lossy(b"\xE2\x98a"); assert_eq!('\u{FFFD}', ch); assert_eq!(2, size); let (ch, size) = decode_lossy(b"\xF0\x9D\x9Ca"); assert_eq!('\u{FFFD}', ch); assert_eq!(3, size); } } <file_sep>/regex-syntax/src/lib.rs /*! This crate provides a robust regular expression parser. This crate defines two primary types: * [`Ast`](ast::Ast) is the abstract syntax of a regular expression. An abstract syntax corresponds to a *structured representation* of the concrete syntax of a regular expression, where the concrete syntax is the pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it can be converted back to the original concrete syntax (modulo some details, like whitespace). To a first approximation, the abstract syntax is complex and difficult to analyze. * [`Hir`](hir::Hir) is the high-level intermediate representation ("HIR" or "high-level IR" for short) of regular expression. It corresponds to an intermediate state of a regular expression that sits between the abstract syntax and the low level compiled opcodes that are eventually responsible for executing a regular expression search. Given some high-level IR, it is not possible to produce the original concrete syntax (although it is possible to produce an equivalent concrete syntax, but it will likely scarcely resemble the original pattern). To a first approximation, the high-level IR is simple and easy to analyze. These two types come with conversion routines: * An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an [`Ast`](ast::Ast). * A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a [`Hir`](hir::Hir). As a convenience, the above two conversion routines are combined into one via the top-level [`Parser`] type. This `Parser` will first convert your pattern to an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level [`parse`] free function. # Example This example shows how to parse a pattern string into its HIR: ``` use regex_syntax::{hir::Hir, parse}; let hir = parse("a|b")?; assert_eq!(hir, Hir::alternation(vec![ Hir::literal("a".as_bytes()), Hir::literal("b".as_bytes()), ])); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Concrete syntax supported The concrete syntax is documented as part of the public API of the [`regex` crate](https://docs.rs/regex/%2A/regex/#syntax). # Input safety A key feature of this library is that it is safe to use with end user facing input. This plays a significant role in the internal implementation. In particular: 1. Parsers provide a `nest_limit` option that permits callers to control how deeply nested a regular expression is allowed to be. This makes it possible to do case analysis over an `Ast` or an `Hir` using recursion without worrying about stack overflow. 2. Since relying on a particular stack size is brittle, this crate goes to great lengths to ensure that all interactions with both the `Ast` and the `Hir` do not use recursion. Namely, they use constant stack space and heap space proportional to the size of the original pattern string (in bytes). This includes the type's corresponding destructors. (One exception to this is literal extraction, but this will eventually get fixed.) # Error reporting The `Display` implementations on all `Error` types exposed in this library provide nice human readable errors that are suitable for showing to end users in a monospace font. # Literal extraction This crate provides limited support for [literal extraction from `Hir` values](hir::literal). Be warned that literal extraction uses recursion, and therefore, stack size proportional to the size of the `Hir`. The purpose of literal extraction is to speed up searches. That is, if you know a regular expression must match a prefix or suffix literal, then it is often quicker to search for instances of that literal, and then confirm or deny the match using the full regular expression engine. These optimizations are done automatically in the `regex` crate. # Crate features An important feature provided by this crate is its Unicode support. This includes things like case folding, boolean properties, general categories, scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`. However, a downside of this support is that it requires bundling several Unicode data tables that are substantial in size. A fair number of use cases do not require full Unicode support. For this reason, this crate exposes a number of features to control which Unicode data is available. If a regular expression attempts to use a Unicode feature that is not available because the corresponding crate feature was disabled, then translating that regular expression to an `Hir` will return an error. (It is still possible construct an `Ast` for such a regular expression, since Unicode data is not used until translation to an `Hir`.) Stated differently, enabling or disabling any of the features below can only add or subtract from the total set of valid regular expressions. Enabling or disabling a feature will never modify the match semantics of a regular expression. The following features are available: * **std** - Enables support for the standard library. This feature is enabled by default. When disabled, only `core` and `alloc` are used. Otherwise, enabling `std` generally just enables `std::error::Error` trait impls for the various error types. * **unicode** - Enables all Unicode features. This feature is enabled by default, and will always cover all Unicode features, even if more are added in the future. * **unicode-age** - Provide the data for the [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). This makes it possible to use classes like `\p{Age:6.0}` to refer to all codepoints first introduced in Unicode 6.0 * **unicode-bool** - Provide the data for numerous Unicode boolean properties. The full list is not included here, but contains properties like `Alphabetic`, `Emoji`, `Lowercase`, `Math`, `Uppercase` and `White_Space`. * **unicode-case** - Provide the data for case insensitive matching using [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). * **unicode-gencat** - Provide the data for [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). This includes, but is not limited to, `Decimal_Number`, `Letter`, `Math_Symbol`, `Number` and `Punctuation`. * **unicode-perl** - Provide the data for supporting the Unicode-aware Perl character classes, corresponding to `\w`, `\s` and `\d`. This is also necessary for using Unicode-aware word boundary assertions. Note that if this feature is disabled, the `\s` and `\d` character classes are still available if the `unicode-bool` and `unicode-gencat` features are enabled, respectively. * **unicode-script** - Provide the data for [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, `Latin` and `Thai`. * **unicode-segment** - Provide the data necessary to provide the properties used to implement the [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and `\p{sb=ATerm}`. * **arbitrary** - Enabling this feature introduces a public dependency on the [`arbitrary`](https://crates.io/crates/arbitrary) crate. Namely, it implements the `Arbitrary` trait from that crate for the [`Ast`](crate::ast::Ast) type. This feature is disabled by default. */ #![no_std] #![forbid(unsafe_code)] #![deny(missing_docs, rustdoc::broken_intra_doc_links)] #![warn(missing_debug_implementations)] // MSRV(1.62): Allow unused warnings. Needed for the 'allow' below, // since the warning is no longer triggered in newer Rust releases. // Once the 'allow(mutable_borrow_reservation_conflict)' can be // removed, we can remove the 'allow(renamed_and_removed_lints)' too. #![allow(renamed_and_removed_lints)] // MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV // is Rust 1.60 at the time of writing, a warning is displayed. But // the lang team decided the code pattern flagged by this warning is // OK, so the warning is innocuous. We can remove this explicit allow // once we get to a Rust release where the warning is no longer // triggered. I believe that's Rust 1.62. #![allow(mutable_borrow_reservation_conflict)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] #[cfg(any(test, feature = "std"))] extern crate std; extern crate alloc; pub use crate::{ error::Error, parser::{parse, Parser, ParserBuilder}, unicode::UnicodeWordError, }; use alloc::string::String; pub mod ast; mod debug; mod either; mod error; pub mod hir; mod parser; mod rank; mod unicode; mod unicode_tables; pub mod utf8; /// Escapes all regular expression meta characters in `text`. /// /// The string returned may be safely used as a literal in a regular /// expression. pub fn escape(text: &str) -> String { let mut quoted = String::new(); escape_into(text, &mut quoted); quoted } /// Escapes all meta characters in `text` and writes the result into `buf`. /// /// This will append escape characters into the given buffer. The characters /// that are appended are safe to use as a literal in a regular expression. pub fn escape_into(text: &str, buf: &mut String) { buf.reserve(text.len()); for c in text.chars() { if is_meta_character(c) { buf.push('\\'); } buf.push(c); } } /// Returns true if the given character has significance in a regex. /// /// Generally speaking, these are the only characters which _must_ be escaped /// in order to match their literal meaning. For example, to match a literal /// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For /// example, `-` is treated as a meta character because of its significance /// for writing ranges inside of character classes, but the regex `-` will /// match a literal `-` because `-` has no special meaning outside of character /// classes. /// /// In order to determine whether a character may be escaped at all, the /// [`is_escapeable_character`] routine should be used. The difference between /// `is_meta_character` and `is_escapeable_character` is that the latter will /// return true for some characters that are _not_ meta characters. For /// example, `%` and `\%` both match a literal `%` in all contexts. In other /// words, `is_escapeable_character` includes "superfluous" escapes. /// /// Note that the set of characters for which this function returns `true` or /// `false` is fixed and won't change in a semver compatible release. (In this /// case, "semver compatible release" actually refers to the `regex` crate /// itself, since reducing or expanding the set of meta characters would be a /// breaking change for not just `regex-syntax` but also `regex` itself.) /// /// # Example /// /// ``` /// use regex_syntax::is_meta_character; /// /// assert!(is_meta_character('?')); /// assert!(is_meta_character('-')); /// assert!(is_meta_character('&')); /// assert!(is_meta_character('#')); /// /// assert!(!is_meta_character('%')); /// assert!(!is_meta_character('/')); /// assert!(!is_meta_character('!')); /// assert!(!is_meta_character('"')); /// assert!(!is_meta_character('e')); /// ``` pub fn is_meta_character(c: char) -> bool { match c { '\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{' | '}' | '^' | '$' | '#' | '&' | '-' | '~' => true, _ => false, } } /// Returns true if the given character can be escaped in a regex. /// /// This returns true in all cases that `is_meta_character` returns true, but /// also returns true in some cases where `is_meta_character` returns false. /// For example, `%` is not a meta character, but it is escapeable. That is, /// `%` and `\%` both match a literal `%` in all contexts. /// /// The purpose of this routine is to provide knowledge about what characters /// may be escaped. Namely, most regex engines permit "superfluous" escapes /// where characters without any special significance may be escaped even /// though there is no actual _need_ to do so. /// /// This will return false for some characters. For example, `e` is not /// escapeable. Therefore, `\e` will either result in a parse error (which is /// true today), or it could backwards compatibly evolve into a new construct /// with its own meaning. Indeed, that is the purpose of banning _some_ /// superfluous escapes: it provides a way to evolve the syntax in a compatible /// manner. /// /// # Example /// /// ``` /// use regex_syntax::is_escapeable_character; /// /// assert!(is_escapeable_character('?')); /// assert!(is_escapeable_character('-')); /// assert!(is_escapeable_character('&')); /// assert!(is_escapeable_character('#')); /// assert!(is_escapeable_character('%')); /// assert!(is_escapeable_character('/')); /// assert!(is_escapeable_character('!')); /// assert!(is_escapeable_character('"')); /// /// assert!(!is_escapeable_character('e')); /// ``` pub fn is_escapeable_character(c: char) -> bool { // Certainly escapeable if it's a meta character. if is_meta_character(c) { return true; } // Any character that isn't ASCII is definitely not escapeable. There's // no real need to allow things like \☃ right? if !c.is_ascii() { return false; } // Otherwise, we basically say that everything is escapeable unless it's a // letter or digit. Things like \3 are either octal (when enabled) or an // error, and we should keep it that way. Otherwise, letters are reserved // for adding new syntax in a backwards compatible way. match c { '0'..='9' | 'A'..='Z' | 'a'..='z' => false, // While not currently supported, we keep these as not escapeable to // give us some flexibility with respect to supporting the \< and // \> word boundary assertions in the future. By rejecting them as // escapeable, \< and \> will result in a parse error. Thus, we can // turn them into something else in the future without it being a // backwards incompatible change. '<' | '>' => false, _ => true, } } /// Returns true if and only if the given character is a Unicode word /// character. /// /// A Unicode word character is defined by /// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). /// In particular, a character /// is considered a word character if it is in either of the `Alphabetic` or /// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` /// or `Connector_Punctuation` general categories. /// /// # Panics /// /// If the `unicode-perl` feature is not enabled, then this function /// panics. For this reason, it is recommended that callers use /// [`try_is_word_character`] instead. pub fn is_word_character(c: char) -> bool { try_is_word_character(c).expect("unicode-perl feature must be enabled") } /// Returns true if and only if the given character is a Unicode word /// character. /// /// A Unicode word character is defined by /// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). /// In particular, a character /// is considered a word character if it is in either of the `Alphabetic` or /// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` /// or `Connector_Punctuation` general categories. /// /// # Errors /// /// If the `unicode-perl` feature is not enabled, then this function always /// returns an error. pub fn try_is_word_character( c: char, ) -> core::result::Result<bool, UnicodeWordError> { unicode::is_word_character(c) } /// Returns true if and only if the given character is an ASCII word character. /// /// An ASCII word character is defined by the following character class: /// `[_0-9a-zA-Z]'. pub fn is_word_byte(c: u8) -> bool { match c { b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true, _ => false, } } #[cfg(test)] mod tests { use alloc::string::ToString; use super::*; #[test] fn escape_meta() { assert_eq!( escape(r"\.+*?()|[]{}^$#&-~"), r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string() ); } #[test] fn word_byte() { assert!(is_word_byte(b'a')); assert!(!is_word_byte(b'-')); } #[test] #[cfg(feature = "unicode-perl")] fn word_char() { assert!(is_word_character('a'), "ASCII"); assert!(is_word_character('à'), "Latin-1"); assert!(is_word_character('β'), "Greek"); assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)"); assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)"); assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)"); assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)"); assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)"); assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)"); assert!(!is_word_character('-')); assert!(!is_word_character('☃')); } #[test] #[should_panic] #[cfg(not(feature = "unicode-perl"))] fn word_char_disabled_panic() { assert!(is_word_character('a')); } #[test] #[cfg(not(feature = "unicode-perl"))] fn word_char_disabled_error() { assert!(try_is_word_character('a').is_err()); } } <file_sep>/testdata/word-boundary.toml # Some of these are cribbed from RE2's test suite. # These test \b. Below are tests for \B. [[test]] name = "wb1" regex = '\b' haystack = "" matches = [] unicode = false [[test]] name = "wb2" regex = '\b' haystack = "a" matches = [[0, 0], [1, 1]] unicode = false [[test]] name = "wb3" regex = '\b' haystack = "ab" matches = [[0, 0], [2, 2]] unicode = false [[test]] name = "wb4" regex = '^\b' haystack = "ab" matches = [[0, 0]] unicode = false [[test]] name = "wb5" regex = '\b$' haystack = "ab" matches = [[2, 2]] unicode = false [[test]] name = "wb6" regex = '^\b$' haystack = "ab" matches = [] unicode = false [[test]] name = "wb7" regex = '\bbar\b' haystack = "nobar bar foo bar" matches = [[6, 9], [14, 17]] unicode = false [[test]] name = "wb8" regex = 'a\b' haystack = "faoa x" matches = [[3, 4]] unicode = false [[test]] name = "wb9" regex = '\bbar' haystack = "bar x" matches = [[0, 3]] unicode = false [[test]] name = "wb10" regex = '\bbar' haystack = "foo\nbar x" matches = [[4, 7]] unicode = false [[test]] name = "wb11" regex = 'bar\b' haystack = "foobar" matches = [[3, 6]] unicode = false [[test]] name = "wb12" regex = 'bar\b' haystack = "foobar\nxxx" matches = [[3, 6]] unicode = false [[test]] name = "wb13" regex = '(?:foo|bar|[A-Z])\b' haystack = "foo" matches = [[0, 3]] unicode = false [[test]] name = "wb14" regex = '(?:foo|bar|[A-Z])\b' haystack = "foo\n" matches = [[0, 3]] unicode = false [[test]] name = "wb15" regex = '\b(?:foo|bar|[A-Z])' haystack = "foo" matches = [[0, 3]] unicode = false [[test]] name = "wb16" regex = '\b(?:foo|bar|[A-Z])\b' haystack = "X" matches = [[0, 1]] unicode = false [[test]] name = "wb17" regex = '\b(?:foo|bar|[A-Z])\b' haystack = "XY" matches = [] unicode = false [[test]] name = "wb18" regex = '\b(?:foo|bar|[A-Z])\b' haystack = "bar" matches = [[0, 3]] unicode = false [[test]] name = "wb19" regex = '\b(?:foo|bar|[A-Z])\b' haystack = "foo" matches = [[0, 3]] unicode = false [[test]] name = "wb20" regex = '\b(?:foo|bar|[A-Z])\b' haystack = "foo\n" matches = [[0, 3]] unicode = false [[test]] name = "wb21" regex = '\b(?:foo|bar|[A-Z])\b' haystack = "ffoo bbar N x" matches = [[10, 11]] unicode = false [[test]] name = "wb22" regex = '\b(?:fo|foo)\b' haystack = "fo" matches = [[0, 2]] unicode = false [[test]] name = "wb23" regex = '\b(?:fo|foo)\b' haystack = "foo" matches = [[0, 3]] unicode = false [[test]] name = "wb24" regex = '\b\b' haystack = "" matches = [] unicode = false [[test]] name = "wb25" regex = '\b\b' haystack = "a" matches = [[0, 0], [1, 1]] unicode = false [[test]] name = "wb26" regex = '\b$' haystack = "" matches = [] unicode = false [[test]] name = "wb27" regex = '\b$' haystack = "x" matches = [[1, 1]] unicode = false [[test]] name = "wb28" regex = '\b$' haystack = "y x" matches = [[3, 3]] unicode = false [[test]] name = "wb29" regex = '(?-u:\b).$' haystack = "x" matches = [[0, 1]] [[test]] name = "wb30" regex = '^\b(?:fo|foo)\b' haystack = "fo" matches = [[0, 2]] unicode = false [[test]] name = "wb31" regex = '^\b(?:fo|foo)\b' haystack = "foo" matches = [[0, 3]] unicode = false [[test]] name = "wb32" regex = '^\b$' haystack = "" matches = [] unicode = false [[test]] name = "wb33" regex = '^\b$' haystack = "x" matches = [] unicode = false [[test]] name = "wb34" regex = '^(?-u:\b).$' haystack = "x" matches = [[0, 1]] [[test]] name = "wb35" regex = '^(?-u:\b).(?-u:\b)$' haystack = "x" matches = [[0, 1]] [[test]] name = "wb36" regex = '^^^^^\b$$$$$' haystack = "" matches = [] unicode = false [[test]] name = "wb37" regex = '^^^^^(?-u:\b).$$$$$' haystack = "x" matches = [[0, 1]] [[test]] name = "wb38" regex = '^^^^^\b$$$$$' haystack = "x" matches = [] unicode = false [[test]] name = "wb39" regex = '^^^^^(?-u:\b\b\b).(?-u:\b\b\b)$$$$$' haystack = "x" matches = [[0, 1]] [[test]] name = "wb40" regex = '(?-u:\b).+(?-u:\b)' haystack = "$$abc$$" matches = [[2, 5]] [[test]] name = "wb41" regex = '\b' haystack = "a b c" matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] unicode = false [[test]] name = "wb42" regex = '\bfoo\b' haystack = "zzz foo zzz" matches = [[4, 7]] unicode = false [[test]] name = "wb43" regex = '\b^' haystack = "ab" matches = [[0, 0]] unicode = false [[test]] name = "wb44" regex = '$\b' haystack = "ab" matches = [[2, 2]] unicode = false # Tests for \B. Note that \B is not allowed if UTF-8 mode is enabled, so we # have to disable it for most of these tests. This is because \B can match at # non-UTF-8 boundaries. [[test]] name = "nb1" regex = '\Bfoo\B' haystack = "n foo xfoox that" matches = [[7, 10]] unicode = false utf8 = false [[test]] name = "nb2" regex = 'a\B' haystack = "faoa x" matches = [[1, 2]] unicode = false utf8 = false [[test]] name = "nb3" regex = '\Bbar' haystack = "bar x" matches = [] unicode = false utf8 = false [[test]] name = "nb4" regex = '\Bbar' haystack = "foo\nbar x" matches = [] unicode = false utf8 = false [[test]] name = "nb5" regex = 'bar\B' haystack = "foobar" matches = [] unicode = false utf8 = false [[test]] name = "nb6" regex = 'bar\B' haystack = "foobar\nxxx" matches = [] unicode = false utf8 = false [[test]] name = "nb7" regex = '(?:foo|bar|[A-Z])\B' haystack = "foox" matches = [[0, 3]] unicode = false utf8 = false [[test]] name = "nb8" regex = '(?:foo|bar|[A-Z])\B' haystack = "foo\n" matches = [] unicode = false utf8 = false [[test]] name = "nb9" regex = '\B' haystack = "" matches = [[0, 0]] unicode = false utf8 = false [[test]] name = "nb10" regex = '\B' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb11" regex = '\B(?:foo|bar|[A-Z])' haystack = "foo" matches = [] unicode = false utf8 = false [[test]] name = "nb12" regex = '\B(?:foo|bar|[A-Z])\B' haystack = "xXy" matches = [[1, 2]] unicode = false utf8 = false [[test]] name = "nb13" regex = '\B(?:foo|bar|[A-Z])\B' haystack = "XY" matches = [] unicode = false utf8 = false [[test]] name = "nb14" regex = '\B(?:foo|bar|[A-Z])\B' haystack = "XYZ" matches = [[1, 2]] unicode = false utf8 = false [[test]] name = "nb15" regex = '\B(?:foo|bar|[A-Z])\B' haystack = "abara" matches = [[1, 4]] unicode = false utf8 = false [[test]] name = "nb16" regex = '\B(?:foo|bar|[A-Z])\B' haystack = "xfoo_" matches = [[1, 4]] unicode = false utf8 = false [[test]] name = "nb17" regex = '\B(?:foo|bar|[A-Z])\B' haystack = "xfoo\n" matches = [] unicode = false utf8 = false [[test]] name = "nb18" regex = '\B(?:foo|bar|[A-Z])\B' haystack = "foo bar vNX" matches = [[9, 10]] unicode = false utf8 = false [[test]] name = "nb19" regex = '\B(?:fo|foo)\B' haystack = "xfoo" matches = [[1, 3]] unicode = false utf8 = false [[test]] name = "nb20" regex = '\B(?:foo|fo)\B' haystack = "xfooo" matches = [[1, 4]] unicode = false utf8 = false [[test]] name = "nb21" regex = '\B\B' haystack = "" matches = [[0, 0]] unicode = false utf8 = false [[test]] name = "nb22" regex = '\B\B' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb23" regex = '\B$' haystack = "" matches = [[0, 0]] unicode = false utf8 = false [[test]] name = "nb24" regex = '\B$' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb25" regex = '\B$' haystack = "y x" matches = [] unicode = false utf8 = false [[test]] name = "nb26" regex = '\B.$' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb27" regex = '^\B(?:fo|foo)\B' haystack = "fo" matches = [] unicode = false utf8 = false [[test]] name = "nb28" regex = '^\B(?:fo|foo)\B' haystack = "fo" matches = [] unicode = false utf8 = false [[test]] name = "nb29" regex = '^\B' haystack = "" matches = [[0, 0]] unicode = false utf8 = false [[test]] name = "nb30" regex = '^\B' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb31" regex = '^\B\B' haystack = "" matches = [[0, 0]] unicode = false utf8 = false [[test]] name = "nb32" regex = '^\B\B' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb33" regex = '^\B$' haystack = "" matches = [[0, 0]] unicode = false utf8 = false [[test]] name = "nb34" regex = '^\B$' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb35" regex = '^\B.$' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb36" regex = '^\B.\B$' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb37" regex = '^^^^^\B$$$$$' haystack = "" matches = [[0, 0]] unicode = false utf8 = false [[test]] name = "nb38" regex = '^^^^^\B.$$$$$' haystack = "x" matches = [] unicode = false utf8 = false [[test]] name = "nb39" regex = '^^^^^\B$$$$$' haystack = "x" matches = [] unicode = false utf8 = false # unicode1* and unicode2* work for both Unicode and ASCII because all matches # are reported as byte offsets, and « and » do not correspond to word # boundaries at either the character or byte level. [[test]] name = "unicode1" regex = '\bx\b' haystack = "«x" matches = [[2, 3]] [[test]] name = "unicode1-only-ascii" regex = '\bx\b' haystack = "«x" matches = [[2, 3]] unicode = false [[test]] name = "unicode2" regex = '\bx\b' haystack = "x»" matches = [[0, 1]] [[test]] name = "unicode2-only-ascii" regex = '\bx\b' haystack = "x»" matches = [[0, 1]] unicode = false # ASCII word boundaries are completely oblivious to Unicode characters, so # even though β is a character, an ASCII \b treats it as a word boundary # when it is adjacent to another ASCII character. (The ASCII \b only looks # at the leading byte of β.) For Unicode \b, the tests are precisely inverted. [[test]] name = "unicode3" regex = '\bx\b' haystack = 'áxβ' matches = [] [[test]] name = "unicode3-only-ascii" regex = '\bx\b' haystack = 'áxβ' matches = [[2, 3]] unicode = false [[test]] name = "unicode4" regex = '\Bx\B' haystack = 'áxβ' matches = [[2, 3]] [[test]] name = "unicode4-only-ascii" regex = '\Bx\B' haystack = 'áxβ' matches = [] unicode = false utf8 = false # The same as above, but with \b instead of \B as a sanity check. [[test]] name = "unicode5" regex = '\b' haystack = "0\U0007EF5E" matches = [[0, 0], [1, 1]] [[test]] name = "unicode5-only-ascii" regex = '\b' haystack = "0\U0007EF5E" matches = [[0, 0], [1, 1]] unicode = false utf8 = false [[test]] name = "unicode5-noutf8" regex = '\b' haystack = '0\xFF\xFF\xFF\xFF' matches = [[0, 0], [1, 1]] unescape = true utf8 = false [[test]] name = "unicode5-noutf8-only-ascii" regex = '\b' haystack = '0\xFF\xFF\xFF\xFF' matches = [[0, 0], [1, 1]] unescape = true unicode = false utf8 = false # Weird special case to ensure that ASCII \B treats each individual code unit # as a non-word byte. (The specific codepoint is irrelevant. It's an arbitrary # codepoint that uses 4 bytes in its UTF-8 encoding and is not a member of the # \w character class.) [[test]] name = "unicode5-not" regex = '\B' haystack = "0\U0007EF5E" matches = [[5, 5]] [[test]] name = "unicode5-not-only-ascii" regex = '\B' haystack = "0\U0007EF5E" matches = [[2, 2], [3, 3], [4, 4], [5, 5]] unicode = false utf8 = false # This gets no matches since \B only matches in the presence of valid UTF-8 # when Unicode is enabled, even when UTF-8 mode is disabled. [[test]] name = "unicode5-not-noutf8" regex = '\B' haystack = '0\xFF\xFF\xFF\xFF' matches = [] unescape = true utf8 = false # But this DOES get matches since \B in ASCII mode only looks at individual # bytes. [[test]] name = "unicode5-not-noutf8-only-ascii" regex = '\B' haystack = '0\xFF\xFF\xFF\xFF' matches = [[2, 2], [3, 3], [4, 4], [5, 5]] unescape = true unicode = false utf8 = false # Some tests of no particular significance. [[test]] name = "unicode6" regex = '\b[0-9]+\b' haystack = "foo 123 bar 456 quux 789" matches = [[4, 7], [12, 15], [21, 24]] [[test]] name = "unicode7" regex = '\b[0-9]+\b' haystack = "foo 123 bar a456 quux 789" matches = [[4, 7], [22, 25]] [[test]] name = "unicode8" regex = '\b[0-9]+\b' haystack = "foo 123 bar 456a quux 789" matches = [[4, 7], [22, 25]] # A variant of the problem described here: # https://github.com/google/re2/blob/89567f5de5b23bb5ad0c26cbafc10bdc7389d1fa/re2/dfa.cc#L658-L667 [[test]] name = "alt-with-assertion-repetition" regex = '(?:\b|%)+' haystack = "z%" bounds = [1, 2] anchored = true matches = [[1, 1]] <file_sep>/regex-lite/Cargo.toml [package] name = "regex-lite" version = "0.1.0" #:version authors = ["The Rust Project Developers", "<NAME> <<EMAIL>>"] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-lang/regex/tree/master/regex-lite" documentation = "https://docs.rs/regex-lite" description = """ A lightweight regex engine that optimizes for binary size and compilation time. """ workspace = ".." edition = "2021" rust-version = "1.60.0" autotests = false # Features are documented in the "Crate features" section of the crate docs: # https://docs.rs/regex-syntax/*/#crate-features # # (Currently there are no supported features. 'std' is technically one, but it # is currently required.) [features] default = ["std", "string"] std = [] string = [] [dev-dependencies] anyhow = "1.0.69" regex-test = { path = "../regex-test", version = "0.1.0" } [[test]] path = "tests/lib.rs" name = "integration" [package.metadata.docs.rs] # We want to document all features. all-features = true <file_sep>/regex-automata/Cargo.toml [package] name = "regex-automata" version = "0.3.8" #:version authors = ["The Rust Project Developers", "<NAME> <<EMAIL>>"] description = "Automata construction and matching using regular expressions." documentation = "https://docs.rs/regex-automata" repository = "https://github.com/rust-lang/regex/tree/master/regex-automata" readme = "README.md" keywords = ["regex", "dfa", "automata", "automaton", "nfa"] license = "MIT OR Apache-2.0" categories = ["text-processing"] edition = "2021" autoexamples = false [lib] bench = false # This crate has many many many features. See the crate docs for a description # of each and when you might want to use them. [features] default = ["std", "syntax", "perf", "unicode", "meta", "nfa", "dfa", "hybrid"] std = ["regex-syntax?/std", "memchr?/std", "aho-corasick?/std", "alloc"] alloc = [] logging = ["dep:log", "aho-corasick?/logging", "memchr?/logging"] syntax = ["dep:regex-syntax", "alloc"] meta = ["syntax", "nfa-pikevm"] nfa = ["nfa-thompson", "nfa-pikevm", "nfa-backtrack"] nfa-thompson = ["alloc"] nfa-pikevm = ["nfa-thompson"] nfa-backtrack = ["nfa-thompson"] dfa = ["dfa-build", "dfa-search", "dfa-onepass"] dfa-build = ["nfa-thompson", "dfa-search"] dfa-search = [] dfa-onepass = ["nfa-thompson"] hybrid = ["alloc", "nfa-thompson"] perf = ["perf-inline", "perf-literal"] perf-inline = [] perf-literal = ["perf-literal-substring", "perf-literal-multisubstring"] perf-literal-substring = ["aho-corasick?/perf-literal", "dep:memchr"] perf-literal-multisubstring = ["std", "dep:aho-corasick"] # Enables all Unicode features. This expands if new Unicode features are added. unicode = [ "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment", "unicode-word-boundary", "regex-syntax?/unicode", ] # Enables use of the `Age` property, e.g., `\p{Age:3.0}`. unicode-age = ["regex-syntax?/unicode-age"] # Enables use of a smattering of boolean properties, e.g., `\p{Emoji}`. unicode-bool = ["regex-syntax?/unicode-bool"] # Enables Unicode-aware case insensitive matching, e.g., `(?i)β`. unicode-case = ["regex-syntax?/unicode-case"] # Enables Unicode general categories, e.g., `\p{Letter}` or `\pL`. unicode-gencat = ["regex-syntax?/unicode-gencat"] # Enables Unicode-aware Perl classes corresponding to `\w`, `\s` and `\d`. unicode-perl = ["regex-syntax?/unicode-perl"] # Enables Unicode scripts and script extensions, e.g., `\p{Greek}`. unicode-script = ["regex-syntax?/unicode-script"] # Enables Unicode segmentation properties, e.g., `\p{gcb=Extend}`. unicode-segment = ["regex-syntax?/unicode-segment"] # Enables Unicode word boundary support. If this is enabled with unicode-perl, # then data tables from regex-syntax are used. Otherwise, a new data table # inside regex-automata will be included. unicode-word-boundary = [] # These are strictly internal features that may be removed or changed in # non-compatible ways. internal-instrument = ["internal-instrument-pikevm"] internal-instrument-pikevm = ["logging", "std"] [dependencies] aho-corasick = { version = "1.0.0", optional = true, default-features = false } log = { version = "0.4.14", optional = true } memchr = { version = "2.6.0", optional = true, default-features = false } regex-syntax = { path = "../regex-syntax", version = "0.7.4", optional = true, default-features = false } [dev-dependencies] anyhow = "1.0.69" bstr = { version = "1.3.0", default-features = false, features = ["std"] } doc-comment = "0.3.3" quickcheck = { version = "1.0.3", default-features = false } regex-test = { path = "../regex-test", version = "0.1.0" } [dev-dependencies.env_logger] version = "0.9.3" default-features = false features = ["atty", "humantime", "termcolor"] # We put these tests here because they are written primarily against the # regex-automata API, and in particular use regex-automata features for # conditional compilation. If we moved these up as tests on 'regex' proper, # then we'd need to duplicate regex-automata's complex features on 'regex' too, # which I really do not want to do. [[test]] path = "tests/lib.rs" name = "integration" <file_sep>/regex-automata/tests/dfa/api.rs use std::error::Error; use regex_automata::{ dfa::{dense, Automaton, OverlappingState}, nfa::thompson, HalfMatch, Input, MatchError, }; // Tests that quit bytes in the forward direction work correctly. #[test] fn quit_fwd() -> Result<(), Box<dyn Error>> { let dfa = dense::Builder::new() .configure(dense::Config::new().quit(b'x', true)) .build("[[:word:]]+$")?; assert_eq!( Err(MatchError::quit(b'x', 3)), dfa.try_search_fwd(&Input::new(b"abcxyz")) ); assert_eq!( dfa.try_search_overlapping_fwd( &Input::new(b"abcxyz"), &mut OverlappingState::start() ), Err(MatchError::quit(b'x', 3)), ); Ok(()) } // Tests that quit bytes in the reverse direction work correctly. #[test] fn quit_rev() -> Result<(), Box<dyn Error>> { let dfa = dense::Builder::new() .configure(dense::Config::new().quit(b'x', true)) .thompson(thompson::Config::new().reverse(true)) .build("^[[:word:]]+")?; assert_eq!( Err(MatchError::quit(b'x', 3)), dfa.try_search_rev(&Input::new(b"abcxyz")) ); Ok(()) } // Tests that if we heuristically enable Unicode word boundaries but then // instruct that a non-ASCII byte should NOT be a quit byte, then the builder // will panic. #[test] #[should_panic] fn quit_panics() { dense::Config::new().unicode_word_boundary(true).quit(b'\xFF', false); } // This tests an intesting case where even if the Unicode word boundary option // is disabled, setting all non-ASCII bytes to be quit bytes will cause Unicode // word boundaries to be enabled. #[test] fn unicode_word_implicitly_works() -> Result<(), Box<dyn Error>> { let mut config = dense::Config::new(); for b in 0x80..=0xFF { config = config.quit(b, true); } let dfa = dense::Builder::new().configure(config).build(r"\b")?; let expected = HalfMatch::must(0, 1); assert_eq!(Ok(Some(expected)), dfa.try_search_fwd(&Input::new(b" a"))); Ok(()) } <file_sep>/regex-automata/src/dfa/special.rs use crate::{ dfa::DEAD, util::{ primitives::StateID, wire::{self, DeserializeError, Endian, SerializeError}, }, }; macro_rules! err { ($msg:expr) => { return Err(DeserializeError::generic($msg)); }; } // Special represents the identifiers in a DFA that correspond to "special" // states. If a state is one or more of the following, then it is considered // special: // // * dead - A non-matching state where all outgoing transitions lead back to // itself. There is only one of these, regardless of whether minimization // has run. The dead state always has an ID of 0. i.e., It is always the // first state in a DFA. // * quit - A state that is entered whenever a byte is seen that should cause // a DFA to give up and stop searching. This results in a MatchError::quit // error being returned at search time. The default configuration for a DFA // has no quit bytes, which means this state is unreachable by default, // although it is always present for reasons of implementation simplicity. // This state is only reachable when the caller configures the DFA to quit // on certain bytes. There is always exactly one of these states and it // is always the second state. (Its actual ID depends on the size of the // alphabet in dense DFAs, since state IDs are premultiplied in order to // allow them to be used directly as indices into the transition table.) // * match - An accepting state, i.e., indicative of a match. There may be // zero or more of these states. // * accelerated - A state where all of its outgoing transitions, except a // few, loop back to itself. These states are candidates for acceleration // via memchr during search. There may be zero or more of these states. // * start - A non-matching state that indicates where the automaton should // start during a search. There is always at least one starting state and // all are guaranteed to be non-match states. (A start state cannot be a // match state because the DFAs in this crate delay all matches by one byte. // So every search that finds a match must move through one transition to // some other match state, even when searching an empty string.) // // These are not mutually exclusive categories. Namely, the following // overlappings can occur: // // * {dead, start} - If a DFA can never lead to a match and it is minimized, // then it will typically compile to something where all starting IDs point // to the DFA's dead state. // * {match, accelerated} - It is possible for a match state to have the // majority of its transitions loop back to itself, which means it's // possible for a match state to be accelerated. // * {start, accelerated} - Similarly, it is possible for a start state to be // accelerated. Note that it is possible for an accelerated state to be // neither a match or a start state. Also note that just because both match // and start states overlap with accelerated states does not mean that // match and start states overlap with each other. In fact, they are // guaranteed not to overlap. // // As a special mention, every DFA always has a dead and a quit state, even // though from the perspective of the DFA, they are equivalent. (Indeed, // minimization special cases them to ensure they don't get merged.) The // purpose of keeping them distinct is to use the quit state as a sentinel to // distguish between whether a search finished successfully without finding // anything or whether it gave up before finishing. // // So the main problem we want to solve here is the *fast* detection of whether // a state is special or not. And we also want to do this while storing as // little extra data as possible. AND we want to be able to quickly determine // which categories a state falls into above if it is special. // // We achieve this by essentially shuffling all special states to the beginning // of a DFA. That is, all special states appear before every other non-special // state. By representing special states this way, we can determine whether a // state is special or not by a single comparison, where special.max is the // identifier of the last special state in the DFA: // // if current_state <= special.max: // ... do something with special state // // The only thing left to do is to determine what kind of special state // it is. Because what we do next depends on that. Since special states // are typically rare, we can afford to do a bit more extra work, but we'd // still like this to be as fast as possible. The trick we employ here is to // continue shuffling states even within the special state range. Such that // one contiguous region corresponds to match states, another for start states // and then an overlapping range for accelerated states. At a high level, our // special state detection might look like this (for leftmost searching, where // we continue searching even after seeing a match): // // byte = input[offset] // current_state = next_state(current_state, byte) // offset += 1 // if current_state <= special.max: // if current_state == 0: // # We can never leave a dead state, so this always marks the // # end of our search. // return last_match // if current_state == special.quit_id: // # A quit state means we give up. If he DFA has no quit state, // # then special.quit_id == 0 == dead, which is handled by the // # conditional above. // return Err(MatchError::quit { byte, offset: offset - 1 }) // if special.min_match <= current_state <= special.max_match: // last_match = Some(offset) // if special.min_accel <= current_state <= special.max_accel: // offset = accelerate(input, offset) // last_match = Some(offset) // elif special.min_start <= current_state <= special.max_start: // offset = prefilter.find(input, offset) // if special.min_accel <= current_state <= special.max_accel: // offset = accelerate(input, offset) // elif special.min_accel <= current_state <= special.max_accel: // offset = accelerate(input, offset) // // There are some small details left out of the logic above. For example, // in order to accelerate a state, we need to know which bytes to search for. // This in turn implies some extra data we need to store in the DFA. To keep // things compact, we would ideally only store // // N = special.max_accel - special.min_accel + 1 // // items. But state IDs are premultiplied, which means they are not contiguous. // So in order to take a state ID and index an array of accelerated structures, // we need to do: // // i = (state_id - special.min_accel) / stride // // (N.B. 'stride' is always a power of 2, so the above can be implemented via // '(state_id - special.min_accel) >> stride2', where 'stride2' is x in // 2^x=stride.) // // Moreover, some of these specialty categories may be empty. For example, // DFAs are not required to have any match states or any accelerated states. // In that case, the lower and upper bounds are both set to 0 (the dead state // ID) and the first `current_state == 0` check subsumes cases where the // ranges are empty. // // Loop unrolling, if applicable, has also been left out of the logic above. // // Graphically, the ranges look like this, where asterisks indicate ranges // that can be empty. Each 'x' is a state. // // quit // dead| // || // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx // | | | | start | | // | |-------------| |-------| | // | match* | | | | // | | | | | // | |----------| | | // | accel* | | // | | | // | | | // |----------------------------|------------------------ // special non-special* #[derive(Clone, Copy, Debug)] pub(crate) struct Special { /// The identifier of the last special state in a DFA. A state is special /// if and only if its identifier is less than or equal to `max`. pub(crate) max: StateID, /// The identifier of the quit state in a DFA. (There is no analogous field /// for the dead state since the dead state's ID is always zero, regardless /// of state ID size.) pub(crate) quit_id: StateID, /// The identifier of the first match state. pub(crate) min_match: StateID, /// The identifier of the last match state. pub(crate) max_match: StateID, /// The identifier of the first accelerated state. pub(crate) min_accel: StateID, /// The identifier of the last accelerated state. pub(crate) max_accel: StateID, /// The identifier of the first start state. pub(crate) min_start: StateID, /// The identifier of the last start state. pub(crate) max_start: StateID, } impl Special { /// Creates a new set of special ranges for a DFA. All ranges are initially /// set to only contain the dead state. This is interpreted as an empty /// range. #[cfg(feature = "dfa-build")] pub(crate) fn new() -> Special { Special { max: DEAD, quit_id: DEAD, min_match: DEAD, max_match: DEAD, min_accel: DEAD, max_accel: DEAD, min_start: DEAD, max_start: DEAD, } } /// Remaps all of the special state identifiers using the function given. #[cfg(feature = "dfa-build")] pub(crate) fn remap(&self, map: impl Fn(StateID) -> StateID) -> Special { Special { max: map(self.max), quit_id: map(self.quit_id), min_match: map(self.min_match), max_match: map(self.max_match), min_accel: map(self.min_accel), max_accel: map(self.max_accel), min_start: map(self.min_start), max_start: map(self.max_start), } } /// Deserialize the given bytes into special state ranges. If the slice /// given is not big enough, then this returns an error. Similarly, if /// any of the expected invariants around special state ranges aren't /// upheld, an error is returned. Note that this does not guarantee that /// the information returned is correct. /// /// Upon success, this returns the number of bytes read in addition to the /// special state IDs themselves. pub(crate) fn from_bytes( mut slice: &[u8], ) -> Result<(Special, usize), DeserializeError> { wire::check_slice_len(slice, 8 * StateID::SIZE, "special states")?; let mut nread = 0; let mut read_id = |what| -> Result<StateID, DeserializeError> { let (id, nr) = wire::try_read_state_id(slice, what)?; nread += nr; slice = &slice[StateID::SIZE..]; Ok(id) }; let max = read_id("special max id")?; let quit_id = read_id("special quit id")?; let min_match = read_id("special min match id")?; let max_match = read_id("special max match id")?; let min_accel = read_id("special min accel id")?; let max_accel = read_id("special max accel id")?; let min_start = read_id("special min start id")?; let max_start = read_id("special max start id")?; let special = Special { max, quit_id, min_match, max_match, min_accel, max_accel, min_start, max_start, }; special.validate()?; assert_eq!(nread, special.write_to_len()); Ok((special, nread)) } /// Validate that the information describing special states satisfies /// all known invariants. pub(crate) fn validate(&self) -> Result<(), DeserializeError> { // Check that both ends of the range are DEAD or neither are. if self.min_match == DEAD && self.max_match != DEAD { err!("min_match is DEAD, but max_match is not"); } if self.min_match != DEAD && self.max_match == DEAD { err!("max_match is DEAD, but min_match is not"); } if self.min_accel == DEAD && self.max_accel != DEAD { err!("min_accel is DEAD, but max_accel is not"); } if self.min_accel != DEAD && self.max_accel == DEAD { err!("max_accel is DEAD, but min_accel is not"); } if self.min_start == DEAD && self.max_start != DEAD { err!("min_start is DEAD, but max_start is not"); } if self.min_start != DEAD && self.max_start == DEAD { err!("max_start is DEAD, but min_start is not"); } // Check that ranges are well formed. if self.min_match > self.max_match { err!("min_match should not be greater than max_match"); } if self.min_accel > self.max_accel { err!("min_accel should not be greater than max_accel"); } if self.min_start > self.max_start { err!("min_start should not be greater than max_start"); } // Check that ranges are ordered with respect to one another. if self.matches() && self.quit_id >= self.min_match { err!("quit_id should not be greater than min_match"); } if self.accels() && self.quit_id >= self.min_accel { err!("quit_id should not be greater than min_accel"); } if self.starts() && self.quit_id >= self.min_start { err!("quit_id should not be greater than min_start"); } if self.matches() && self.accels() && self.min_accel < self.min_match { err!("min_match should not be greater than min_accel"); } if self.matches() && self.starts() && self.min_start < self.min_match { err!("min_match should not be greater than min_start"); } if self.accels() && self.starts() && self.min_start < self.min_accel { err!("min_accel should not be greater than min_start"); } // Check that max is at least as big as everything else. if self.max < self.quit_id { err!("quit_id should not be greater than max"); } if self.max < self.max_match { err!("max_match should not be greater than max"); } if self.max < self.max_accel { err!("max_accel should not be greater than max"); } if self.max < self.max_start { err!("max_start should not be greater than max"); } Ok(()) } /// Validate that the special state information is compatible with the /// given state len. pub(crate) fn validate_state_len( &self, len: usize, stride2: usize, ) -> Result<(), DeserializeError> { // We assume that 'validate' has already passed, so we know that 'max' // is truly the max. So all we need to check is that the max state ID // is less than the state ID len. The max legal value here is len-1, // which occurs when there are no non-special states. if (self.max.as_usize() >> stride2) >= len { err!("max should not be greater than or equal to state length"); } Ok(()) } /// Write the IDs and ranges for special states to the given byte buffer. /// The buffer given must have enough room to store all data, otherwise /// this will return an error. The number of bytes written is returned /// on success. The number of bytes written is guaranteed to be a multiple /// of 8. pub(crate) fn write_to<E: Endian>( &self, dst: &mut [u8], ) -> Result<usize, SerializeError> { use crate::util::wire::write_state_id as write; if dst.len() < self.write_to_len() { return Err(SerializeError::buffer_too_small("special state ids")); } let mut nwrite = 0; nwrite += write::<E>(self.max, &mut dst[nwrite..]); nwrite += write::<E>(self.quit_id, &mut dst[nwrite..]); nwrite += write::<E>(self.min_match, &mut dst[nwrite..]); nwrite += write::<E>(self.max_match, &mut dst[nwrite..]); nwrite += write::<E>(self.min_accel, &mut dst[nwrite..]); nwrite += write::<E>(self.max_accel, &mut dst[nwrite..]); nwrite += write::<E>(self.min_start, &mut dst[nwrite..]); nwrite += write::<E>(self.max_start, &mut dst[nwrite..]); assert_eq!( self.write_to_len(), nwrite, "expected to write certain number of bytes", ); assert_eq!( nwrite % 8, 0, "expected to write multiple of 8 bytes for special states", ); Ok(nwrite) } /// Returns the total number of bytes written by `write_to`. pub(crate) fn write_to_len(&self) -> usize { 8 * StateID::SIZE } /// Sets the maximum special state ID based on the current values. This /// should be used once all possible state IDs are set. #[cfg(feature = "dfa-build")] pub(crate) fn set_max(&mut self) { use core::cmp::max; self.max = max( self.quit_id, max(self.max_match, max(self.max_accel, self.max_start)), ); } /// Sets the maximum special state ID such that starting states are not /// considered "special." This also marks the min/max starting states as /// DEAD such that 'is_start_state' always returns false, even if the state /// is actually a starting state. /// /// This is useful when there is no prefilter set. It will avoid /// ping-ponging between the hot path in the DFA search code and the start /// state handling code, which is typically only useful for executing a /// prefilter. #[cfg(feature = "dfa-build")] pub(crate) fn set_no_special_start_states(&mut self) { use core::cmp::max; self.max = max(self.quit_id, max(self.max_match, self.max_accel)); self.min_start = DEAD; self.max_start = DEAD; } /// Returns true if and only if the given state ID is a special state. #[inline] pub(crate) fn is_special_state(&self, id: StateID) -> bool { id <= self.max } /// Returns true if and only if the given state ID is a dead state. #[inline] pub(crate) fn is_dead_state(&self, id: StateID) -> bool { id == DEAD } /// Returns true if and only if the given state ID is a quit state. #[inline] pub(crate) fn is_quit_state(&self, id: StateID) -> bool { !self.is_dead_state(id) && self.quit_id == id } /// Returns true if and only if the given state ID is a match state. #[inline] pub(crate) fn is_match_state(&self, id: StateID) -> bool { !self.is_dead_state(id) && self.min_match <= id && id <= self.max_match } /// Returns true if and only if the given state ID is an accel state. #[inline] pub(crate) fn is_accel_state(&self, id: StateID) -> bool { !self.is_dead_state(id) && self.min_accel <= id && id <= self.max_accel } /// Returns true if and only if the given state ID is a start state. #[inline] pub(crate) fn is_start_state(&self, id: StateID) -> bool { !self.is_dead_state(id) && self.min_start <= id && id <= self.max_start } /// Returns the total number of match states for a dense table based DFA. #[inline] pub(crate) fn match_len(&self, stride: usize) -> usize { if self.matches() { (self.max_match.as_usize() - self.min_match.as_usize() + stride) / stride } else { 0 } } /// Returns true if and only if there is at least one match state. #[inline] pub(crate) fn matches(&self) -> bool { self.min_match != DEAD } /// Returns the total number of accel states. #[cfg(feature = "dfa-build")] pub(crate) fn accel_len(&self, stride: usize) -> usize { if self.accels() { (self.max_accel.as_usize() - self.min_accel.as_usize() + stride) / stride } else { 0 } } /// Returns true if and only if there is at least one accel state. #[inline] pub(crate) fn accels(&self) -> bool { self.min_accel != DEAD } /// Returns true if and only if there is at least one start state. #[inline] pub(crate) fn starts(&self) -> bool { self.min_start != DEAD } } <file_sep>/regex-automata/tests/fuzz/dense.rs // This test was found by a fuzzer input that crafted a way to provide // an invalid serialization of ByteClasses that passed our verification. // Specifically, the verification step in the deserialization of ByteClasses // used an iterator that depends on part of the serialized bytes being correct. // (Specifically, the encoding of the number of classes.) #[test] fn invalid_byte_classes() { let data = include_bytes!( "testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9", ); let _ = fuzz_run(data); } #[test] fn invalid_byte_classes_min() { let data = include_bytes!( "testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9", ); let _ = fuzz_run(data); } // This is the code from the fuzz target. Kind of sucks to duplicate it here, // but this is fundamentally how we interpret the date. fn fuzz_run(given_data: &[u8]) -> Option<()> { use regex_automata::dfa::Automaton; if given_data.len() < 2 { return None; } let haystack_len = usize::from(given_data[0]); let haystack = given_data.get(1..1 + haystack_len)?; let given_dfa_bytes = given_data.get(1 + haystack_len..)?; // We help the fuzzer along by adding a preamble to the bytes that should // at least make these first parts valid. The preamble expects a very // specific sequence of bytes, so it makes sense to just force this. let label = "rust-regex-automata-dfa-dense\x00\x00\x00"; assert_eq!(0, label.len() % 4); let endianness_check = 0xFEFFu32.to_ne_bytes().to_vec(); let version_check = 2u32.to_ne_bytes().to_vec(); let mut dfa_bytes: Vec<u8> = vec![]; dfa_bytes.extend(label.as_bytes()); dfa_bytes.extend(&endianness_check); dfa_bytes.extend(&version_check); dfa_bytes.extend(given_dfa_bytes); // This is the real test: checking that any input we give to // DFA::from_bytes will never result in a panic. let (dfa, _) = regex_automata::dfa::dense::DFA::from_bytes(&dfa_bytes).ok()?; let _ = dfa.try_search_fwd(&regex_automata::Input::new(haystack)); Some(()) } <file_sep>/regex-automata/src/util/determinize/mod.rs /*! This module contains types and routines for implementing determinization. In this crate, there are at least two places where we implement determinization: fully ahead-of-time compiled DFAs in the `dfa` module and lazily compiled DFAs in the `hybrid` module. The stuff in this module corresponds to the things that are in common between these implementations. There are three broad things that our implementations of determinization have in common, as defined by this module: * The classification of start states. That is, whether we're dealing with word boundaries, line boundaries, etc., is all the same. This also includes the look-behind assertions that are satisfied by each starting state classification. * The representation of DFA states as sets of NFA states, including convenience types for building these DFA states that are amenable to reusing allocations. * Routines for the "classical" parts of determinization: computing the epsilon closure, tracking match states (with corresponding pattern IDs, since we support multi-pattern finite automata) and, of course, computing the transition function between states for units of input. I did consider a couple of alternatives to this particular form of code reuse: 1. Don't do any code reuse. The problem here is that we *really* want both forms of determinization to do exactly identical things when it comes to their handling of NFA states. While our tests generally ensure this, the code is tricky and large enough where not reusing code is a pretty big bummer. 2. Implement all of determinization once and make it generic over fully compiled DFAs and lazily compiled DFAs. While I didn't actually try this approach, my instinct is that it would be more complex than is needed here. And the interface required would be pretty hairy. Instead, I think splitting it into logical sub-components works better. */ use alloc::vec::Vec; pub(crate) use self::state::{ State, StateBuilderEmpty, StateBuilderMatches, StateBuilderNFA, }; use crate::{ nfa::thompson, util::{ alphabet, look::{Look, LookSet}, primitives::StateID, search::MatchKind, sparse_set::{SparseSet, SparseSets}, start::Start, utf8, }, }; mod state; /// Compute the set of all reachable NFA states, including the full epsilon /// closure, from a DFA state for a single unit of input. The set of reachable /// states is returned as a `StateBuilderNFA`. The `StateBuilderNFA` returned /// also includes any look-behind assertions satisfied by `unit`, in addition /// to whether it is a match state. For multi-pattern DFAs, the builder will /// also include the pattern IDs that match (in the order seen). /// /// `nfa` must be able to resolve any NFA state in `state` and any NFA state /// reachable via the epsilon closure of any NFA state in `state`. `sparses` /// must have capacity equivalent to `nfa.len()`. /// /// `match_kind` should correspond to the match semantics implemented by the /// DFA being built. Generally speaking, for leftmost-first match semantics, /// states that appear after the first NFA match state will not be included in /// the `StateBuilderNFA` returned since they are impossible to visit. /// /// `sparses` is used as scratch space for NFA traversal. Other than their /// capacity requirements (detailed above), there are no requirements on what's /// contained within them (if anything). Similarly, what's inside of them once /// this routine returns is unspecified. /// /// `stack` must have length 0. It is used as scratch space for depth first /// traversal. After returning, it is guaranteed that `stack` will have length /// 0. /// /// `state` corresponds to the current DFA state on which one wants to compute /// the transition for the input `unit`. /// /// `empty_builder` corresponds to the builder allocation to use to produce a /// complete `StateBuilderNFA` state. If the state is not needed (or is already /// cached), then it can be cleared and reused without needing to create a new /// `State`. The `StateBuilderNFA` state returned is final and ready to be /// turned into a `State` if necessary. pub(crate) fn next( nfa: &thompson::NFA, match_kind: MatchKind, sparses: &mut SparseSets, stack: &mut Vec<StateID>, state: &State, unit: alphabet::Unit, empty_builder: StateBuilderEmpty, ) -> StateBuilderNFA { sparses.clear(); // Whether the NFA is matched in reverse or not. We use this in some // conditional logic for dealing with the exceptionally annoying CRLF-aware // line anchors. let rev = nfa.is_reverse(); // The look-around matcher that our NFA is configured with. We don't // actually use it to match look-around assertions, but we do need its // configuration for constructing states consistent with how it matches. let lookm = nfa.look_matcher(); // Put the NFA state IDs into a sparse set in case we need to // re-compute their epsilon closure. // // Doing this state shuffling is technically not necessary unless some // kind of look-around is used in the DFA. Some ad hoc experiments // suggested that avoiding this didn't lead to much of an improvement, // but perhaps more rigorous experimentation should be done. And in // particular, avoiding this check requires some light refactoring of // the code below. state.iter_nfa_state_ids(|nfa_id| { sparses.set1.insert(nfa_id); }); // Compute look-ahead assertions originating from the current state. Based // on the input unit we're transitioning over, some additional set of // assertions may be true. Thus, we re-compute this state's epsilon closure // (but only if necessary). Notably, when we build a DFA state initially, // we don't enable any look-ahead assertions because we don't know whether // they're true or not at that point. if !state.look_need().is_empty() { // Add look-ahead assertions that are now true based on the current // input unit. let mut look_have = state.look_have().clone(); match unit.as_u8() { Some(b'\r') => { if !rev || !state.is_half_crlf() { look_have = look_have.insert(Look::EndCRLF); } } Some(b'\n') => { if rev || !state.is_half_crlf() { look_have = look_have.insert(Look::EndCRLF); } } Some(_) => {} None => { look_have = look_have.insert(Look::End); look_have = look_have.insert(Look::EndLF); look_have = look_have.insert(Look::EndCRLF); } } if unit.is_byte(lookm.get_line_terminator()) { look_have = look_have.insert(Look::EndLF); } if state.is_half_crlf() && ((rev && !unit.is_byte(b'\r')) || (!rev && !unit.is_byte(b'\n'))) { look_have = look_have.insert(Look::StartCRLF); } if state.is_from_word() == unit.is_word_byte() { look_have = look_have.insert(Look::WordUnicodeNegate); look_have = look_have.insert(Look::WordAsciiNegate); } else { look_have = look_have.insert(Look::WordUnicode); look_have = look_have.insert(Look::WordAscii); } // If we have new assertions satisfied that are among the set of // assertions that exist in this state (that is, just because we added // an EndLF assertion above doesn't mean there is an EndLF conditional // epsilon transition in this state), then we re-compute this state's // epsilon closure using the updated set of assertions. // // Note that since our DFA states omit unconditional epsilon // transitions, this check is necessary for correctness. If we re-did // the epsilon closure below needlessly, it could change based on the // fact that we omitted epsilon states originally. if !look_have .subtract(state.look_have()) .intersect(state.look_need()) .is_empty() { for nfa_id in sparses.set1.iter() { epsilon_closure( nfa, nfa_id, look_have, stack, &mut sparses.set2, ); } sparses.swap(); sparses.set2.clear(); } } // Convert our empty builder into one that can record assertions and match // pattern IDs. let mut builder = empty_builder.into_matches(); // Set whether the StartLF look-behind assertion is true for this // transition or not. The look-behind assertion for ASCII word boundaries // is handled below. if nfa.look_set_any().contains_anchor_line() && unit.is_byte(lookm.get_line_terminator()) { // Why only handle StartLF here and not Start? That's because Start // can only impact the starting state, which is special cased in // start state handling. builder.set_look_have(|have| have.insert(Look::StartLF)); } // We also need to add StartCRLF to our assertions too, if we can. This // is unfortunately a bit more complicated, because it depends on the // direction of the search. In the forward direction, ^ matches after a // \n, but in the reverse direction, ^ only matches after a \r. (This is // further complicated by the fact that reverse a regex means changing a ^ // to a $ and vice versa.) if nfa.look_set_any().contains_anchor_crlf() && ((rev && unit.is_byte(b'\r')) || (!rev && unit.is_byte(b'\n'))) { builder.set_look_have(|have| have.insert(Look::StartCRLF)); } for nfa_id in sparses.set1.iter() { match *nfa.state(nfa_id) { thompson::State::Union { .. } | thompson::State::BinaryUnion { .. } | thompson::State::Fail | thompson::State::Look { .. } | thompson::State::Capture { .. } => {} thompson::State::Match { pattern_id } => { // Notice here that we are calling the NEW state a match // state if the OLD state we are transitioning from // contains an NFA match state. This is precisely how we // delay all matches by one byte and also what therefore // guarantees that starting states cannot be match states. // // If we didn't delay matches by one byte, then whether // a DFA is a matching state or not would be determined // by whether one of its own constituent NFA states // was a match state. (And that would be done in // 'add_nfa_states'.) // // Also, 'add_match_pattern_id' requires that callers never // pass duplicative pattern IDs. We do in fact uphold that // guarantee here, but it's subtle. In particular, a Thompson // NFA guarantees that each pattern has exactly one match // state. Moreover, since we're iterating over the NFA state // IDs in a set, we are guarateed not to have any duplicative // match states. Thus, it is impossible to add the same pattern // ID more than once. // // N.B. We delay matches by 1 byte as a way to hack 1-byte // look-around into DFA searches. This lets us support ^, $ // and ASCII-only \b. The delay is also why we need a special // "end-of-input" (EOI) sentinel and why we need to follow the // EOI sentinel at the end of every search. This final EOI // transition is necessary to report matches found at the end // of a haystack. builder.add_match_pattern_id(pattern_id); if !match_kind.continue_past_first_match() { break; } } thompson::State::ByteRange { ref trans } => { if trans.matches_unit(unit) { epsilon_closure( nfa, trans.next, builder.look_have(), stack, &mut sparses.set2, ); } } thompson::State::Sparse(ref sparse) => { if let Some(next) = sparse.matches_unit(unit) { epsilon_closure( nfa, next, builder.look_have(), stack, &mut sparses.set2, ); } } thompson::State::Dense(ref dense) => { if let Some(next) = dense.matches_unit(unit) { epsilon_closure( nfa, next, builder.look_have(), stack, &mut sparses.set2, ); } } } } // We only set the word byte if there's a word boundary look-around // anywhere in this regex. Otherwise, there's no point in bloating the // number of states if we don't have one. // // We also only set it when the state has a non-zero number of NFA states. // Otherwise, we could wind up with states that *should* be DEAD states // but are otherwise distinct from DEAD states because of this look-behind // assertion being set. While this can't technically impact correctness *in // theory*, it can create pathological DFAs that consume input until EOI or // a quit byte is seen. Consuming until EOI isn't a correctness problem, // but a (serious) perf problem. Hitting a quit byte, however, could be a // correctness problem since it could cause search routines to report an // error instead of a detected match once the quit state is entered. (The // search routine could be made to be a bit smarter by reporting a match // if one was detected once it enters a quit state (and indeed, the search // routines in this crate do just that), but it seems better to prevent // these things by construction if possible.) if !sparses.set2.is_empty() { if nfa.look_set_any().contains_word() && unit.is_word_byte() { builder.set_is_from_word(); } if nfa.look_set_any().contains_anchor_crlf() && ((rev && unit.is_byte(b'\n')) || (!rev && unit.is_byte(b'\r'))) { builder.set_is_half_crlf(); } } let mut builder_nfa = builder.into_nfa(); add_nfa_states(nfa, &sparses.set2, &mut builder_nfa); builder_nfa } /// Compute the epsilon closure for the given NFA state. The epsilon closure /// consists of all NFA state IDs, including `start_nfa_id`, that can be /// reached from `start_nfa_id` without consuming any input. These state IDs /// are written to `set` in the order they are visited, but only if they are /// not already in `set`. `start_nfa_id` must be a valid state ID for the NFA /// given. /// /// `look_have` consists of the satisfied assertions at the current /// position. For conditional look-around epsilon transitions, these are /// only followed if they are satisfied by `look_have`. /// /// `stack` must have length 0. It is used as scratch space for depth first /// traversal. After returning, it is guaranteed that `stack` will have length /// 0. pub(crate) fn epsilon_closure( nfa: &thompson::NFA, start_nfa_id: StateID, look_have: LookSet, stack: &mut Vec<StateID>, set: &mut SparseSet, ) { assert!(stack.is_empty()); // If this isn't an epsilon state, then the epsilon closure is always just // itself, so there's no need to spin up the machinery below to handle it. if !nfa.state(start_nfa_id).is_epsilon() { set.insert(start_nfa_id); return; } stack.push(start_nfa_id); while let Some(mut id) = stack.pop() { // In many cases, we can avoid stack operations when an NFA state only // adds one new state to visit. In that case, we just set our ID to // that state and mush on. We only use the stack when an NFA state // introduces multiple new states to visit. loop { // Insert this NFA state, and if it's already in the set and thus // already visited, then we can move on to the next one. if !set.insert(id) { break; } match *nfa.state(id) { thompson::State::ByteRange { .. } | thompson::State::Sparse { .. } | thompson::State::Dense { .. } | thompson::State::Fail | thompson::State::Match { .. } => break, thompson::State::Look { look, next } => { if !look_have.contains(look) { break; } id = next; } thompson::State::Union { ref alternates } => { id = match alternates.get(0) { None => break, Some(&id) => id, }; // We need to process our alternates in order to preserve // match preferences, so put the earliest alternates closer // to the top of the stack. stack.extend(alternates[1..].iter().rev()); } thompson::State::BinaryUnion { alt1, alt2 } => { id = alt1; stack.push(alt2); } thompson::State::Capture { next, .. } => { id = next; } } } } } /// Add the NFA state IDs in the given `set` to the given DFA builder state. /// The order in which states are added corresponds to the order in which they /// were added to `set`. /// /// The DFA builder state given should already have its complete set of match /// pattern IDs added (if any) and any look-behind assertions (StartLF, Start /// and whether this state is being generated for a transition over a word byte /// when applicable) that are true immediately prior to transitioning into this /// state (via `builder.look_have()`). The match pattern IDs should correspond /// to matches that occurred on the previous transition, since all matches are /// delayed by one byte. The things that should _not_ be set are look-ahead /// assertions (EndLF, End and whether the next byte is a word byte or not). /// The builder state should also not have anything in `look_need` set, as this /// routine will compute that for you. /// /// The given NFA should be able to resolve all identifiers in `set` to a /// particular NFA state. Additionally, `set` must have capacity equivalent /// to `nfa.len()`. pub(crate) fn add_nfa_states( nfa: &thompson::NFA, set: &SparseSet, builder: &mut StateBuilderNFA, ) { for nfa_id in set.iter() { match *nfa.state(nfa_id) { thompson::State::ByteRange { .. } => { builder.add_nfa_state_id(nfa_id); } thompson::State::Sparse { .. } => { builder.add_nfa_state_id(nfa_id); } thompson::State::Dense { .. } => { builder.add_nfa_state_id(nfa_id); } thompson::State::Look { look, .. } => { builder.add_nfa_state_id(nfa_id); builder.set_look_need(|need| need.insert(look)); } thompson::State::Union { .. } | thompson::State::BinaryUnion { .. } => { // Pure epsilon transitions don't need to be tracked as part // of the DFA state. Tracking them is actually superfluous; // they won't cause any harm other than making determinization // slower. // // Why aren't these needed? Well, in an NFA, epsilon // transitions are really just jumping points to other states. // So once you hit an epsilon transition, the same set of // resulting states always appears. Therefore, putting them in // a DFA's set of ordered NFA states is strictly redundant. // // Look-around states are also epsilon transitions, but // they are *conditional*. So their presence could be // discriminatory, and thus, they are tracked above. // // But wait... why are epsilon states in our `set` in the first // place? Why not just leave them out? They're in our `set` // because it was generated by computing an epsilon closure, // and we want to keep track of all states we visited to avoid // re-visiting them. In exchange, we have to do this second // iteration over our collected states to finalize our DFA // state. In theory, we could avoid this second iteration if // we maintained two sets during epsilon closure: the set of // visited states (to avoid cycles) and the set of states that // will actually be used to construct the next DFA state. // // Note that this optimization requires that we re-compute the // epsilon closure to account for look-ahead in 'next' *only // when necessary*. Namely, only when the set of look-around // assertions changes and only when those changes are within // the set of assertions that are needed in order to step // through the closure correctly. Otherwise, if we re-do the // epsilon closure needlessly, it could change based on the // fact that we are omitting epsilon states here. // // ----- // // Welp, scratch the above. It turns out that recording these // is in fact necessary to seemingly handle one particularly // annoying case: when a conditional epsilon transition is // put inside of a repetition operator. One specific case I // ran into was the regex `(?:\b|%)+` on the haystack `z%`. // The correct leftmost first matches are: [0, 0] and [1, 1]. // But the DFA was reporting [0, 0] and [1, 2]. To understand // why this happens, consider the NFA for the aforementioned // regex: // // >000000: binary-union(4, 1) // 000001: \x00-\xFF => 0 // 000002: WordAscii => 5 // 000003: % => 5 // ^000004: binary-union(2, 3) // 000005: binary-union(4, 6) // 000006: MATCH(0) // // The problem here is that one of the DFA start states is // going to consist of the NFA states [2, 3] by computing the // epsilon closure of state 4. State 4 isn't included because // we previously were not keeping track of union states. But // only a subset of transitions out of this state will be able // to follow WordAscii, and in those cases, the epsilon closure // is redone. The only problem is that computing the epsilon // closure from [2, 3] is different than computing the epsilon // closure from [4]. In the former case, assuming the WordAscii // assertion is satisfied, you get: [2, 3, 6]. In the latter // case, you get: [2, 6, 3]. Notice that '6' is the match state // and appears AFTER '3' in the former case. This leads to a // preferential but incorrect match of '%' before returning // a match. In the latter case, the match is preferred over // continuing to accept the '%'. // // It almost feels like we might be able to fix the NFA states // to avoid this, or to at least only keep track of union // states where this actually matters, since in the vast // majority of cases, this doesn't matter. // // Another alternative would be to define a new HIR property // called "assertion is repeated anywhere" and compute it // inductively over the entire pattern. If it happens anywhere, // which is probably pretty rare, then we record union states. // Otherwise we don't. builder.add_nfa_state_id(nfa_id); } // Capture states we definitely do not need to record, since they // are unconditional epsilon transitions with no branching. thompson::State::Capture { .. } => {} // It's not totally clear whether we need to record fail states or // not, but we do so out of an abundance of caution. Since they are // quite rare in practice, there isn't much cost to recording them. thompson::State::Fail => { builder.add_nfa_state_id(nfa_id); } thompson::State::Match { .. } => { // Normally, the NFA match state doesn't actually need to // be inside the DFA state. But since we delay matches by // one byte, the matching DFA state corresponds to states // that transition from the one we're building here. And // the way we detect those cases is by looking for an NFA // match state. See 'next' for how this is handled. builder.add_nfa_state_id(nfa_id); } } } // If we know this state contains no look-around assertions, then // there's no reason to track which look-around assertions were // satisfied when this state was created. if builder.look_need().is_empty() { builder.set_look_have(|_| LookSet::empty()); } } /// Sets the appropriate look-behind assertions on the given state based on /// this starting configuration. pub(crate) fn set_lookbehind_from_start( nfa: &thompson::NFA, start: &Start, builder: &mut StateBuilderMatches, ) { let rev = nfa.is_reverse(); let lineterm = nfa.look_matcher().get_line_terminator(); match *start { Start::NonWordByte => {} Start::WordByte => { builder.set_is_from_word(); } Start::Text => { builder.set_look_have(|have| { have.insert(Look::Start) .insert(Look::StartLF) .insert(Look::StartCRLF) }); } Start::LineLF => { if rev { builder.set_is_half_crlf(); builder.set_look_have(|have| have.insert(Look::StartLF)); } else { builder.set_look_have(|have| have.insert(Look::StartCRLF)); } if lineterm == b'\n' { builder.set_look_have(|have| have.insert(Look::StartLF)); } } Start::LineCR => { if rev { builder.set_look_have(|have| have.insert(Look::StartCRLF)); } else { builder.set_is_half_crlf(); } if lineterm == b'\r' { builder.set_look_have(|have| have.insert(Look::StartLF)); } } Start::CustomLineTerminator => { builder.set_look_have(|have| have.insert(Look::StartLF)); // This is a bit of a tricky case, but if the line terminator was // set to a word byte, then we also need to behave as if the start // configuration is Start::WordByte. That is, we need to mark our // state as having come from a word byte. if utf8::is_word_byte(lineterm) { builder.set_is_from_word(); } } } } <file_sep>/regex-syntax/src/hir/literal.rs /*! Provides literal extraction from `Hir` expressions. An [`Extractor`] pulls literals out of [`Hir`] expressions and returns a [`Seq`] of [`Literal`]s. The purpose of literal extraction is generally to provide avenues for optimizing regex searches. The main idea is that substring searches can be an order of magnitude faster than a regex search. Therefore, if one can execute a substring search to find candidate match locations and only run the regex search at those locations, then it is possible for huge improvements in performance to be realized. With that said, literal optimizations are generally a black art because even though substring search is generally faster, if the number of candidates produced is high, then it can create a lot of overhead by ping-ponging between the substring search and the regex search. Here are some heuristics that might be used to help increase the chances of effective literal optimizations: * Stick to small [`Seq`]s. If you search for too many literals, it's likely to lead to substring search that is only a little faster than a regex search, and thus the overhead of using literal optimizations in the first place might make things slower overall. * The literals in your [`Seq`] shouldn't be too short. In general, longer is better. A sequence corresponding to single bytes that occur frequently in the haystack, for example, is probably a bad literal optimization because it's likely to produce many false positive candidates. Longer literals are less likely to match, and thus probably produce fewer false positives. * If it's possible to estimate the approximate frequency of each byte according to some pre-computed background distribution, it is possible to compute a score of how "good" a `Seq` is. If a `Seq` isn't good enough, you might consider skipping the literal optimization and just use the regex engine. (It should be noted that there are always pathological cases that can make any kind of literal optimization be a net slower result. This is why it might be a good idea to be conservative, or to even provide a means for literal optimizations to be dynamically disabled if they are determined to be ineffective according to some measure.) You're encouraged to explore the methods on [`Seq`], which permit shrinking the size of sequences in a preference-order preserving fashion. Finally, note that it isn't strictly necessary to use an [`Extractor`]. Namely, an `Extractor` only uses public APIs of the [`Seq`] and [`Literal`] types, so it is possible to implement your own extractor. For example, for n-grams or "inner" literals (i.e., not prefix or suffix literals). The `Extractor` is mostly responsible for the case analysis over `Hir` expressions. Much of the "trickier" parts are how to combine literal sequences, and that is all implemented on [`Seq`]. */ use core::{cmp, mem, num::NonZeroUsize}; use alloc::{vec, vec::Vec}; use crate::hir::{self, Hir}; /// Extracts prefix or suffix literal sequences from [`Hir`] expressions. /// /// Literal extraction is based on the following observations: /// /// * Many regexes start with one or a small number of literals. /// * Substring search for literals is often much faster (sometimes by an order /// of magnitude) than a regex search. /// /// Thus, in many cases, one can search for literals to find candidate starting /// locations of a match, and then only run the full regex engine at each such /// location instead of over the full haystack. /// /// The main downside of literal extraction is that it can wind up causing a /// search to be slower overall. For example, if there are many matches or if /// there are many candidates that don't ultimately lead to a match, then a /// lot of overhead will be spent in shuffing back-and-forth between substring /// search and the regex engine. This is the fundamental reason why literal /// optimizations for regex patterns is sometimes considered a "black art." /// /// # Look-around assertions /// /// Literal extraction treats all look-around assertions as-if they match every /// empty string. So for example, the regex `\bquux\b` will yield a sequence /// containing a single exact literal `quux`. However, not all occurrences /// of `quux` correspond to a match a of the regex. For example, `\bquux\b` /// does not match `ZquuxZ` anywhere because `quux` does not fall on a word /// boundary. /// /// In effect, if your regex contains look-around assertions, then a match of /// an exact literal does not necessarily mean the regex overall matches. So /// you may still need to run the regex engine in such cases to confirm the /// match. /// /// The precise guarantee you get from a literal sequence is: if every literal /// in the sequence is exact and the original regex contains zero look-around /// assertions, then a preference-order multi-substring search of those /// literals will precisely match a preference-order search of the original /// regex. /// /// # Example /// /// This shows how to extract prefixes: /// /// ``` /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; /// /// let hir = parse(r"(a|b|c)(x|y|z)[A-Z]+foo")?; /// let got = Extractor::new().extract(&hir); /// // All literals returned are "inexact" because none of them reach the /// // match state. /// let expected = Seq::from_iter([ /// Literal::inexact("ax"), /// Literal::inexact("ay"), /// Literal::inexact("az"), /// Literal::inexact("bx"), /// Literal::inexact("by"), /// Literal::inexact("bz"), /// Literal::inexact("cx"), /// Literal::inexact("cy"), /// Literal::inexact("cz"), /// ]); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` /// /// This shows how to extract suffixes: /// /// ``` /// use regex_syntax::{ /// hir::literal::{Extractor, ExtractKind, Literal, Seq}, /// parse, /// }; /// /// let hir = parse(r"foo|[A-Z]+bar")?; /// let got = Extractor::new().kind(ExtractKind::Suffix).extract(&hir); /// // Since 'foo' gets to a match state, it is considered exact. But 'bar' /// // does not because of the '[A-Z]+', and thus is marked inexact. /// let expected = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("bar"), /// ]); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug)] pub struct Extractor { kind: ExtractKind, limit_class: usize, limit_repeat: usize, limit_literal_len: usize, limit_total: usize, } impl Extractor { /// Create a new extractor with a default configuration. /// /// The extractor can be optionally configured before calling /// [`Extractor::extract`] to get a literal sequence. pub fn new() -> Extractor { Extractor { kind: ExtractKind::Prefix, limit_class: 10, limit_repeat: 10, limit_literal_len: 100, limit_total: 250, } } /// Execute the extractor and return a sequence of literals. pub fn extract(&self, hir: &Hir) -> Seq { use crate::hir::HirKind::*; match *hir.kind() { Empty | Look(_) => Seq::singleton(self::Literal::exact(vec![])), Literal(hir::Literal(ref bytes)) => { let mut seq = Seq::singleton(self::Literal::exact(bytes.to_vec())); self.enforce_literal_len(&mut seq); seq } Class(hir::Class::Unicode(ref cls)) => { self.extract_class_unicode(cls) } Class(hir::Class::Bytes(ref cls)) => self.extract_class_bytes(cls), Repetition(ref rep) => self.extract_repetition(rep), Capture(hir::Capture { ref sub, .. }) => self.extract(sub), Concat(ref hirs) => match self.kind { ExtractKind::Prefix => self.extract_concat(hirs.iter()), ExtractKind::Suffix => self.extract_concat(hirs.iter().rev()), }, Alternation(ref hirs) => { // Unlike concat, we always union starting from the beginning, // since the beginning corresponds to the highest preference, // which doesn't change based on forwards vs reverse. self.extract_alternation(hirs.iter()) } } } /// Set the kind of literal sequence to extract from an [`Hir`] expression. /// /// The default is to extract prefixes, but suffixes can be selected /// instead. The contract for prefixes is that every match of the /// corresponding `Hir` must start with one of the literals in the sequence /// returned. Moreover, the _order_ of the sequence returned corresponds to /// the preference order. /// /// Suffixes satisfy a similar contract in that every match of the /// corresponding `Hir` must end with one of the literals in the sequence /// returned. However, there is no guarantee that the literals are in /// preference order. /// /// Remember that a sequence can be infinite. For example, unless the /// limits are configured to be impractically large, attempting to extract /// prefixes (or suffixes) for the pattern `[A-Z]` will return an infinite /// sequence. Generally speaking, if the sequence returned is infinite, /// then it is presumed to be unwise to do prefix (or suffix) optimizations /// for the pattern. pub fn kind(&mut self, kind: ExtractKind) -> &mut Extractor { self.kind = kind; self } /// Configure a limit on the length of the sequence that is permitted for /// a character class. If a character class exceeds this limit, then the /// sequence returned for it is infinite. /// /// This prevents classes like `[A-Z]` or `\pL` from getting turned into /// huge and likely unproductive sequences of literals. /// /// # Example /// /// This example shows how this limit can be lowered to decrease the tolerance /// for character classes being turned into literal sequences. /// /// ``` /// use regex_syntax::{hir::literal::{Extractor, Seq}, parse}; /// /// let hir = parse(r"[0-9]")?; /// /// let got = Extractor::new().extract(&hir); /// let expected = Seq::new([ /// "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", /// ]); /// assert_eq!(expected, got); /// /// // Now let's shrink the limit and see how that changes things. /// let got = Extractor::new().limit_class(4).extract(&hir); /// let expected = Seq::infinite(); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn limit_class(&mut self, limit: usize) -> &mut Extractor { self.limit_class = limit; self } /// Configure a limit on the total number of repetitions that is permitted /// before literal extraction is stopped. /// /// This is useful for limiting things like `(abcde){50}`, or more /// insidiously, `(?:){1000000000}`. This limit prevents any one single /// repetition from adding too much to a literal sequence. /// /// With this limit set, repetitions that exceed it will be stopped and any /// literals extracted up to that point will be made inexact. /// /// # Example /// /// This shows how to decrease the limit and compares it with the default. /// /// ``` /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; /// /// let hir = parse(r"(abc){8}")?; /// /// let got = Extractor::new().extract(&hir); /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); /// assert_eq!(expected, got); /// /// // Now let's shrink the limit and see how that changes things. /// let got = Extractor::new().limit_repeat(4).extract(&hir); /// let expected = Seq::from_iter([ /// Literal::inexact("abcabcabcabc"), /// ]); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn limit_repeat(&mut self, limit: usize) -> &mut Extractor { self.limit_repeat = limit; self } /// Configure a limit on the maximum length of any literal in a sequence. /// /// This is useful for limiting things like `(abcde){5}{5}{5}{5}`. While /// each repetition or literal in that regex is small, when all the /// repetitions are applied, one ends up with a literal of length `5^4 = /// 625`. /// /// With this limit set, literals that exceed it will be made inexact and /// thus prevented from growing. /// /// # Example /// /// This shows how to decrease the limit and compares it with the default. /// /// ``` /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; /// /// let hir = parse(r"(abc){2}{2}{2}")?; /// /// let got = Extractor::new().extract(&hir); /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); /// assert_eq!(expected, got); /// /// // Now let's shrink the limit and see how that changes things. /// let got = Extractor::new().limit_literal_len(14).extract(&hir); /// let expected = Seq::from_iter([ /// Literal::inexact("abcabcabcabcab"), /// ]); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn limit_literal_len(&mut self, limit: usize) -> &mut Extractor { self.limit_literal_len = limit; self } /// Configure a limit on the total number of literals that will be /// returned. /// /// This is useful as a practical measure for avoiding the creation of /// large sequences of literals. While the extractor will automatically /// handle local creations of large sequences (for example, `[A-Z]` yields /// an infinite sequence by default), large sequences can be created /// through non-local means as well. /// /// For example, `[ab]{3}{3}` would yield a sequence of length `512 = 2^9` /// despite each of the repetitions being small on their own. This limit /// thus represents a "catch all" for avoiding locally small sequences from /// combining into large sequences. /// /// # Example /// /// This example shows how reducing the limit will change the literal /// sequence returned. /// /// ``` /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; /// /// let hir = parse(r"[ab]{2}{2}")?; /// /// let got = Extractor::new().extract(&hir); /// let expected = Seq::new([ /// "aaaa", "aaab", "aaba", "aabb", /// "abaa", "abab", "abba", "abbb", /// "baaa", "baab", "baba", "babb", /// "bbaa", "bbab", "bbba", "bbbb", /// ]); /// assert_eq!(expected, got); /// /// // The default limit is not too big, but big enough to extract all /// // literals from '[ab]{2}{2}'. If we shrink the limit to less than 16, /// // then we'll get a truncated set. Notice that it returns a sequence of /// // length 4 even though our limit was 10. This is because the sequence /// // is difficult to increase without blowing the limit. Notice also /// // that every literal in the sequence is now inexact because they were /// // stripped of some suffix. /// let got = Extractor::new().limit_total(10).extract(&hir); /// let expected = Seq::from_iter([ /// Literal::inexact("aa"), /// Literal::inexact("ab"), /// Literal::inexact("ba"), /// Literal::inexact("bb"), /// ]); /// assert_eq!(expected, got); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn limit_total(&mut self, limit: usize) -> &mut Extractor { self.limit_total = limit; self } /// Extract a sequence from the given concatenation. Sequences from each of /// the child HIR expressions are combined via cross product. /// /// This short circuits once the cross product turns into a sequence /// containing only inexact literals. fn extract_concat<'a, I: Iterator<Item = &'a Hir>>(&self, it: I) -> Seq { let mut seq = Seq::singleton(self::Literal::exact(vec![])); for hir in it { // If every element in the sequence is inexact, then a cross // product will always be a no-op. Thus, there is nothing else we // can add to it and can quit early. Note that this also includes // infinite sequences. if seq.is_inexact() { break; } // Note that 'cross' also dispatches based on whether we're // extracting prefixes or suffixes. seq = self.cross(seq, &mut self.extract(hir)); } seq } /// Extract a sequence from the given alternation. /// /// This short circuits once the union turns into an infinite sequence. fn extract_alternation<'a, I: Iterator<Item = &'a Hir>>( &self, it: I, ) -> Seq { let mut seq = Seq::empty(); for hir in it { // Once our 'seq' is infinite, every subsequent union // operation on it will itself always result in an // infinite sequence. Thus, it can never change and we can // short-circuit. if !seq.is_finite() { break; } seq = self.union(seq, &mut self.extract(hir)); } seq } /// Extract a sequence of literals from the given repetition. We do our /// best, Some examples: /// /// 'a*' => [inexact(a), exact("")] /// 'a*?' => [exact(""), inexact(a)] /// 'a+' => [inexact(a)] /// 'a{3}' => [exact(aaa)] /// 'a{3,5} => [inexact(aaa)] /// /// The key here really is making sure we get the 'inexact' vs 'exact' /// attributes correct on each of the literals we add. For example, the /// fact that 'a*' gives us an inexact 'a' and an exact empty string means /// that a regex like 'ab*c' will result in [inexact(ab), exact(ac)] /// literals being extracted, which might actually be a better prefilter /// than just 'a'. fn extract_repetition(&self, rep: &hir::Repetition) -> Seq { let mut subseq = self.extract(&rep.sub); match *rep { hir::Repetition { min: 0, max, greedy, .. } => { // When 'max=1', we can retain exactness, since 'a?' is // equivalent to 'a|'. Similarly below, 'a??' is equivalent to // '|a'. if max != Some(1) { subseq.make_inexact(); } let mut empty = Seq::singleton(Literal::exact(vec![])); if !greedy { mem::swap(&mut subseq, &mut empty); } self.union(subseq, &mut empty) } hir::Repetition { min, max: Some(max), .. } if min == max => { assert!(min > 0); // handled above let limit = u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); let mut seq = Seq::singleton(Literal::exact(vec![])); for _ in 0..cmp::min(min, limit) { if seq.is_inexact() { break; } seq = self.cross(seq, &mut subseq.clone()); } if usize::try_from(min).is_err() || min > limit { seq.make_inexact(); } seq } hir::Repetition { min, .. } => { assert!(min > 0); // handled above let limit = u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); let mut seq = Seq::singleton(Literal::exact(vec![])); for _ in 0..cmp::min(min, limit) { if seq.is_inexact() { break; } seq = self.cross(seq, &mut subseq.clone()); } seq.make_inexact(); seq } } } /// Convert the given Unicode class into a sequence of literals if the /// class is small enough. If the class is too big, return an infinite /// sequence. fn extract_class_unicode(&self, cls: &hir::ClassUnicode) -> Seq { if self.class_over_limit_unicode(cls) { return Seq::infinite(); } let mut seq = Seq::empty(); for r in cls.iter() { for ch in r.start()..=r.end() { seq.push(Literal::from(ch)); } } self.enforce_literal_len(&mut seq); seq } /// Convert the given byte class into a sequence of literals if the class /// is small enough. If the class is too big, return an infinite sequence. fn extract_class_bytes(&self, cls: &hir::ClassBytes) -> Seq { if self.class_over_limit_bytes(cls) { return Seq::infinite(); } let mut seq = Seq::empty(); for r in cls.iter() { for b in r.start()..=r.end() { seq.push(Literal::from(b)); } } self.enforce_literal_len(&mut seq); seq } /// Returns true if the given Unicode class exceeds the configured limits /// on this extractor. fn class_over_limit_unicode(&self, cls: &hir::ClassUnicode) -> bool { let mut count = 0; for r in cls.iter() { if count > self.limit_class { return true; } count += r.len(); } count > self.limit_class } /// Returns true if the given byte class exceeds the configured limits on /// this extractor. fn class_over_limit_bytes(&self, cls: &hir::ClassBytes) -> bool { let mut count = 0; for r in cls.iter() { if count > self.limit_class { return true; } count += r.len(); } count > self.limit_class } /// Compute the cross product of the two sequences if the result would be /// within configured limits. Otherwise, make `seq2` infinite and cross the /// infinite sequence with `seq1`. fn cross(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { if seq1.max_cross_len(seq2).map_or(false, |len| len > self.limit_total) { seq2.make_infinite(); } if let ExtractKind::Suffix = self.kind { seq1.cross_reverse(seq2); } else { seq1.cross_forward(seq2); } assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); self.enforce_literal_len(&mut seq1); seq1 } /// Union the two sequences if the result would be within configured /// limits. Otherwise, make `seq2` infinite and union the infinite sequence /// with `seq1`. fn union(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { if seq1.max_union_len(seq2).map_or(false, |len| len > self.limit_total) { // We try to trim our literal sequences to see if we can make // room for more literals. The idea is that we'd rather trim down // literals already in our sequence if it means we can add a few // more and retain a finite sequence. Otherwise, we'll union with // an infinite sequence and that infects everything and effectively // stops literal extraction in its tracks. // // We do we keep 4 bytes here? Well, it's a bit of an abstraction // leakage. Downstream, the literals may wind up getting fed to // the Teddy algorithm, which supports searching literals up to // length 4. So that's why we pick that number here. Arguably this // should be a tuneable parameter, but it seems a little tricky to // describe. And I'm still unsure if this is the right way to go // about culling literal sequences. match self.kind { ExtractKind::Prefix => { seq1.keep_first_bytes(4); seq2.keep_first_bytes(4); } ExtractKind::Suffix => { seq1.keep_last_bytes(4); seq2.keep_last_bytes(4); } } seq1.dedup(); seq2.dedup(); if seq1 .max_union_len(seq2) .map_or(false, |len| len > self.limit_total) { seq2.make_infinite(); } } seq1.union(seq2); assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); seq1 } /// Applies the literal length limit to the given sequence. If none of the /// literals in the sequence exceed the limit, then this is a no-op. fn enforce_literal_len(&self, seq: &mut Seq) { let len = self.limit_literal_len; match self.kind { ExtractKind::Prefix => seq.keep_first_bytes(len), ExtractKind::Suffix => seq.keep_last_bytes(len), } } } impl Default for Extractor { fn default() -> Extractor { Extractor::new() } } /// The kind of literals to extract from an [`Hir`] expression. /// /// The default extraction kind is `Prefix`. #[non_exhaustive] #[derive(Clone, Debug)] pub enum ExtractKind { /// Extracts only prefix literals from a regex. Prefix, /// Extracts only suffix literals from a regex. /// /// Note that the sequence returned by suffix literals currently may /// not correctly represent leftmost-first or "preference" order match /// semantics. Suffix, } impl ExtractKind { /// Returns true if this kind is the `Prefix` variant. pub fn is_prefix(&self) -> bool { matches!(*self, ExtractKind::Prefix) } /// Returns true if this kind is the `Suffix` variant. pub fn is_suffix(&self) -> bool { matches!(*self, ExtractKind::Suffix) } } impl Default for ExtractKind { fn default() -> ExtractKind { ExtractKind::Prefix } } /// A sequence of literals. /// /// A `Seq` is very much like a set in that it represents a union of its /// members. That is, it corresponds to a set of literals where at least one /// must match in order for a particular [`Hir`] expression to match. (Whether /// this corresponds to the entire `Hir` expression, a prefix of it or a suffix /// of it depends on how the `Seq` was extracted from the `Hir`.) /// /// It is also unlike a set in that multiple identical literals may appear, /// and that the order of the literals in the `Seq` matters. For example, if /// the sequence is `[sam, samwise]` and leftmost-first matching is used, then /// `samwise` can never match and the sequence is equivalent to `[sam]`. /// /// # States of a sequence /// /// A `Seq` has a few different logical states to consider: /// /// * The sequence can represent "any" literal. When this happens, the set does /// not have a finite size. The purpose of this state is to inhibit callers /// from making assumptions about what literals are required in order to match /// a particular [`Hir`] expression. Generally speaking, when a set is in this /// state, literal optimizations are inhibited. A good example of a regex that /// will cause this sort of set to appear is `[A-Za-z]`. The character class /// is just too big (and also too narrow) to be usefully expanded into 52 /// different literals. (Note that the decision for when a seq should become /// infinite is determined by the caller. A seq itself has no hard-coded /// limits.) /// * The sequence can be empty, in which case, it is an affirmative statement /// that there are no literals that can match the corresponding `Hir`. /// Consequently, the `Hir` never matches any input. For example, `[a&&b]`. /// * The sequence can be non-empty, in which case, at least one of the /// literals must match in order for the corresponding `Hir` to match. /// /// # Example /// /// This example shows how literal sequences can be simplified by stripping /// suffixes and minimizing while maintaining preference order. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq = Seq::new(&[ /// "farm", /// "appliance", /// "faraway", /// "apple", /// "fare", /// "gap", /// "applicant", /// "applaud", /// ]); /// seq.keep_first_bytes(3); /// seq.minimize_by_preference(); /// // Notice that 'far' comes before 'app', which matches the order in the /// // original sequence. This guarantees that leftmost-first semantics are /// // not altered by simplifying the set. /// let expected = Seq::from_iter([ /// Literal::inexact("far"), /// Literal::inexact("app"), /// Literal::exact("gap"), /// ]); /// assert_eq!(expected, seq); /// ``` #[derive(Clone, Eq, PartialEq)] pub struct Seq { /// The members of this seq. /// /// When `None`, the seq represents all possible literals. That is, it /// prevents one from making assumptions about specific literals in the /// seq, and forces one to treat it as if any literal might be in the seq. /// /// Note that `Some(vec![])` is valid and corresponds to the empty seq of /// literals, i.e., a regex that can never match. For example, `[a&&b]`. /// It is distinct from `Some(vec![""])`, which corresponds to the seq /// containing an empty string, which matches at every position. literals: Option<Vec<Literal>>, } impl Seq { /// Returns an empty sequence. /// /// An empty sequence matches zero literals, and thus corresponds to a /// regex that itself can never match. #[inline] pub fn empty() -> Seq { Seq { literals: Some(vec![]) } } /// Returns a sequence of literals without a finite size and may contain /// any literal. /// /// A sequence without finite size does not reveal anything about the /// characteristics of the literals in its set. There are no fixed prefixes /// or suffixes, nor are lower or upper bounds on the length of the literals /// in the set known. /// /// This is useful to represent constructs in a regex that are "too big" /// to useful represent as a sequence of literals. For example, `[A-Za-z]`. /// When sequences get too big, they lose their discriminating nature and /// are more likely to produce false positives, which in turn makes them /// less likely to speed up searches. /// /// More pragmatically, for many regexes, enumerating all possible literals /// is itself not possible or might otherwise use too many resources. So /// constraining the size of sets during extraction is a practical trade /// off to make. #[inline] pub fn infinite() -> Seq { Seq { literals: None } } /// Returns a sequence containing a single literal. #[inline] pub fn singleton(lit: Literal) -> Seq { Seq { literals: Some(vec![lit]) } } /// Returns a sequence of exact literals from the given byte strings. #[inline] pub fn new<I, B>(it: I) -> Seq where I: IntoIterator<Item = B>, B: AsRef<[u8]>, { it.into_iter().map(|b| Literal::exact(b.as_ref())).collect() } /// If this is a finite sequence, return its members as a slice of /// literals. /// /// The slice returned may be empty, in which case, there are no literals /// that can match this sequence. #[inline] pub fn literals(&self) -> Option<&[Literal]> { self.literals.as_deref() } /// Push a literal to the end of this sequence. /// /// If this sequence is not finite, then this is a no-op. /// /// Similarly, if the most recently added item of this sequence is /// equivalent to the literal given, then it is not added. This reflects /// a `Seq`'s "set like" behavior, and represents a practical trade off. /// Namely, there is never any need to have two adjacent and equivalent /// literals in the same sequence, _and_ it is easy to detect in some /// cases. #[inline] pub fn push(&mut self, lit: Literal) { let lits = match self.literals { None => return, Some(ref mut lits) => lits, }; if lits.last().map_or(false, |m| m == &lit) { return; } lits.push(lit); } /// Make all of the literals in this sequence inexact. /// /// This is a no-op if this sequence is not finite. #[inline] pub fn make_inexact(&mut self) { let lits = match self.literals { None => return, Some(ref mut lits) => lits, }; for lit in lits.iter_mut() { lit.make_inexact(); } } /// Converts this sequence to an infinite sequence. /// /// This is a no-op if the sequence is already infinite. #[inline] pub fn make_infinite(&mut self) { self.literals = None; } /// Modify this sequence to contain the cross product between it and the /// sequence given. /// /// The cross product only considers literals in this sequence that are /// exact. That is, inexact literals are not extended. /// /// The literals are always drained from `other`, even if none are used. /// This permits callers to reuse the sequence allocation elsewhere. /// /// If this sequence is infinite, then this is a no-op, regardless of what /// `other` contains (and in this case, the literals are still drained from /// `other`). If `other` is infinite and this sequence is finite, then this /// is a no-op, unless this sequence contains a zero-length literal. In /// which case, the infiniteness of `other` infects this sequence, and this /// sequence is itself made infinite. /// /// Like [`Seq::union`], this may attempt to deduplicate literals. See /// [`Seq::dedup`] for how deduplication deals with exact and inexact /// literals. /// /// # Example /// /// This example shows basic usage and how exact and inexact literals /// interact. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("bar"), /// ]); /// let mut seq2 = Seq::from_iter([ /// Literal::inexact("quux"), /// Literal::exact("baz"), /// ]); /// seq1.cross_forward(&mut seq2); /// /// // The literals are pulled out of seq2. /// assert_eq!(Some(0), seq2.len()); /// /// let expected = Seq::from_iter([ /// Literal::inexact("fooquux"), /// Literal::exact("foobaz"), /// Literal::inexact("bar"), /// ]); /// assert_eq!(expected, seq1); /// ``` /// /// This example shows the behavior of when `other` is an infinite /// sequence. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("bar"), /// ]); /// let mut seq2 = Seq::infinite(); /// seq1.cross_forward(&mut seq2); /// /// // When seq2 is infinite, cross product doesn't add anything, but /// // ensures all members of seq1 are inexact. /// let expected = Seq::from_iter([ /// Literal::inexact("foo"), /// Literal::inexact("bar"), /// ]); /// assert_eq!(expected, seq1); /// ``` /// /// This example is like the one above, but shows what happens when this /// sequence contains an empty string. In this case, an infinite `other` /// sequence infects this sequence (because the empty string means that /// there are no finite prefixes): /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::exact(""), // inexact provokes same behavior /// Literal::inexact("bar"), /// ]); /// let mut seq2 = Seq::infinite(); /// seq1.cross_forward(&mut seq2); /// /// // seq1 is now infinite! /// assert!(!seq1.is_finite()); /// ``` /// /// This example shows the behavior of this sequence is infinite. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::infinite(); /// let mut seq2 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("bar"), /// ]); /// seq1.cross_forward(&mut seq2); /// /// // seq1 remains unchanged. /// assert!(!seq1.is_finite()); /// // Even though the literals in seq2 weren't used, it was still drained. /// assert_eq!(Some(0), seq2.len()); /// ``` #[inline] pub fn cross_forward(&mut self, other: &mut Seq) { let (lits1, lits2) = match self.cross_preamble(other) { None => return, Some((lits1, lits2)) => (lits1, lits2), }; let newcap = lits1.len().saturating_mul(lits2.len()); for selflit in mem::replace(lits1, Vec::with_capacity(newcap)) { if !selflit.is_exact() { lits1.push(selflit); continue; } for otherlit in lits2.iter() { let mut newlit = Literal::exact(Vec::with_capacity( selflit.len() + otherlit.len(), )); newlit.extend(&selflit); newlit.extend(&otherlit); if !otherlit.is_exact() { newlit.make_inexact(); } lits1.push(newlit); } } lits2.drain(..); self.dedup(); } /// Modify this sequence to contain the cross product between it and /// the sequence given, where the sequences are treated as suffixes /// instead of prefixes. Namely, the sequence `other` is *prepended* /// to `self` (as opposed to `other` being *appended* to `self` in /// [`Seq::cross_forward`]). /// /// The cross product only considers literals in this sequence that are /// exact. That is, inexact literals are not extended. /// /// The literals are always drained from `other`, even if none are used. /// This permits callers to reuse the sequence allocation elsewhere. /// /// If this sequence is infinite, then this is a no-op, regardless of what /// `other` contains (and in this case, the literals are still drained from /// `other`). If `other` is infinite and this sequence is finite, then this /// is a no-op, unless this sequence contains a zero-length literal. In /// which case, the infiniteness of `other` infects this sequence, and this /// sequence is itself made infinite. /// /// Like [`Seq::union`], this may attempt to deduplicate literals. See /// [`Seq::dedup`] for how deduplication deals with exact and inexact /// literals. /// /// # Example /// /// This example shows basic usage and how exact and inexact literals /// interact. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("bar"), /// ]); /// let mut seq2 = Seq::from_iter([ /// Literal::inexact("quux"), /// Literal::exact("baz"), /// ]); /// seq1.cross_reverse(&mut seq2); /// /// // The literals are pulled out of seq2. /// assert_eq!(Some(0), seq2.len()); /// /// let expected = Seq::from_iter([ /// Literal::inexact("quuxfoo"), /// Literal::inexact("bar"), /// Literal::exact("bazfoo"), /// ]); /// assert_eq!(expected, seq1); /// ``` /// /// This example shows the behavior of when `other` is an infinite /// sequence. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("bar"), /// ]); /// let mut seq2 = Seq::infinite(); /// seq1.cross_reverse(&mut seq2); /// /// // When seq2 is infinite, cross product doesn't add anything, but /// // ensures all members of seq1 are inexact. /// let expected = Seq::from_iter([ /// Literal::inexact("foo"), /// Literal::inexact("bar"), /// ]); /// assert_eq!(expected, seq1); /// ``` /// /// This example is like the one above, but shows what happens when this /// sequence contains an empty string. In this case, an infinite `other` /// sequence infects this sequence (because the empty string means that /// there are no finite suffixes): /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::exact(""), // inexact provokes same behavior /// Literal::inexact("bar"), /// ]); /// let mut seq2 = Seq::infinite(); /// seq1.cross_reverse(&mut seq2); /// /// // seq1 is now infinite! /// assert!(!seq1.is_finite()); /// ``` /// /// This example shows the behavior when this sequence is infinite. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq1 = Seq::infinite(); /// let mut seq2 = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("bar"), /// ]); /// seq1.cross_reverse(&mut seq2); /// /// // seq1 remains unchanged. /// assert!(!seq1.is_finite()); /// // Even though the literals in seq2 weren't used, it was still drained. /// assert_eq!(Some(0), seq2.len()); /// ``` #[inline] pub fn cross_reverse(&mut self, other: &mut Seq) { let (lits1, lits2) = match self.cross_preamble(other) { None => return, Some((lits1, lits2)) => (lits1, lits2), }; // We basically proceed as we do in 'cross_forward' at this point, // except that the outer loop is now 'other' and the inner loop is now // 'self'. That's because 'self' corresponds to suffixes and 'other' // corresponds to the sequence we want to *prepend* to the suffixes. let newcap = lits1.len().saturating_mul(lits2.len()); let selflits = mem::replace(lits1, Vec::with_capacity(newcap)); for (i, otherlit) in lits2.drain(..).enumerate() { for selflit in selflits.iter() { if !selflit.is_exact() { // If the suffix isn't exact, then we can't prepend // anything to it. However, we still want to keep it. But // we only want to keep one of them, to avoid duplication. // (The duplication is okay from a correctness perspective, // but wasteful.) if i == 0 { lits1.push(selflit.clone()); } continue; } let mut newlit = Literal::exact(Vec::with_capacity( otherlit.len() + selflit.len(), )); newlit.extend(&otherlit); newlit.extend(&selflit); if !otherlit.is_exact() { newlit.make_inexact(); } lits1.push(newlit); } } self.dedup(); } /// A helper function the corresponds to the subtle preamble for both /// `cross_forward` and `cross_reverse`. In effect, it handles the cases /// of infinite sequences for both `self` and `other`, as well as ensuring /// that literals from `other` are drained even if they aren't used. fn cross_preamble<'a>( &'a mut self, other: &'a mut Seq, ) -> Option<(&'a mut Vec<Literal>, &'a mut Vec<Literal>)> { let lits2 = match other.literals { None => { // If our current seq contains the empty string and the seq // we're adding matches any literal, then it follows that the // current seq must now also match any literal. // // Otherwise, we just have to make sure everything in this // sequence is inexact. if self.min_literal_len() == Some(0) { *self = Seq::infinite(); } else { self.make_inexact(); } return None; } Some(ref mut lits) => lits, }; let lits1 = match self.literals { None => { // If we aren't going to make it to the end of this routine // where lits2 is drained, then we need to do it now. lits2.drain(..); return None; } Some(ref mut lits) => lits, }; Some((lits1, lits2)) } /// Unions the `other` sequence into this one. /// /// The literals are always drained out of the given `other` sequence, /// even if they are being unioned into an infinite sequence. This permits /// the caller to reuse the `other` sequence in another context. /// /// Some literal deduping may be performed. If any deduping happens, /// any leftmost-first or "preference" order match semantics will be /// preserved. /// /// # Example /// /// This example shows basic usage. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let mut seq1 = Seq::new(&["foo", "bar"]); /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); /// seq1.union(&mut seq2); /// /// // The literals are pulled out of seq2. /// assert_eq!(Some(0), seq2.len()); /// /// // Adjacent literals are deduped, but non-adjacent literals may not be. /// assert_eq!(Seq::new(&["foo", "bar", "quux", "foo"]), seq1); /// ``` /// /// This example shows that literals are drained from `other` even when /// they aren't necessarily used. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let mut seq1 = Seq::infinite(); /// // Infinite sequences have no finite length. /// assert_eq!(None, seq1.len()); /// /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); /// seq1.union(&mut seq2); /// /// // seq1 is still infinite and seq2 has been drained. /// assert_eq!(None, seq1.len()); /// assert_eq!(Some(0), seq2.len()); /// ``` #[inline] pub fn union(&mut self, other: &mut Seq) { let lits2 = match other.literals { None => { // Unioning with an infinite sequence always results in an // infinite sequence. self.make_infinite(); return; } Some(ref mut lits) => lits.drain(..), }; let lits1 = match self.literals { None => return, Some(ref mut lits) => lits, }; lits1.extend(lits2); self.dedup(); } /// Unions the `other` sequence into this one by splice the `other` /// sequence at the position of the first zero-length literal. /// /// This is useful for preserving preference order semantics when combining /// two literal sequences. For example, in the regex `(a||f)+foo`, the /// correct preference order prefix sequence is `[a, foo, f]`. /// /// The literals are always drained out of the given `other` sequence, /// even if they are being unioned into an infinite sequence. This permits /// the caller to reuse the `other` sequence in another context. Note that /// the literals are drained even if no union is performed as well, i.e., /// when this sequence does not contain a zero-length literal. /// /// Some literal deduping may be performed. If any deduping happens, /// any leftmost-first or "preference" order match semantics will be /// preserved. /// /// # Example /// /// This example shows basic usage. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let mut seq1 = Seq::new(&["a", "", "f", ""]); /// let mut seq2 = Seq::new(&["foo"]); /// seq1.union_into_empty(&mut seq2); /// /// // The literals are pulled out of seq2. /// assert_eq!(Some(0), seq2.len()); /// // 'foo' gets spliced into seq1 where the first empty string occurs. /// assert_eq!(Seq::new(&["a", "foo", "f"]), seq1); /// ``` /// /// This example shows that literals are drained from `other` even when /// they aren't necessarily used. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let mut seq1 = Seq::new(&["foo", "bar"]); /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); /// seq1.union_into_empty(&mut seq2); /// /// // seq1 has no zero length literals, so no splicing happens. /// assert_eq!(Seq::new(&["foo", "bar"]), seq1); /// // Even though no splicing happens, seq2 is still drained. /// assert_eq!(Some(0), seq2.len()); /// ``` #[inline] pub fn union_into_empty(&mut self, other: &mut Seq) { let lits2 = other.literals.as_mut().map(|lits| lits.drain(..)); let lits1 = match self.literals { None => return, Some(ref mut lits) => lits, }; let first_empty = match lits1.iter().position(|m| m.is_empty()) { None => return, Some(i) => i, }; let lits2 = match lits2 { None => { // Note that we are only here if we've found an empty literal, // which implies that an infinite sequence infects this seq and // also turns it into an infinite sequence. self.literals = None; return; } Some(lits) => lits, }; // Clearing out the empties needs to come before the splice because // the splice might add more empties that we don't want to get rid // of. Since we're splicing into the position of the first empty, the // 'first_empty' position computed above is still correct. lits1.retain(|m| !m.is_empty()); lits1.splice(first_empty..first_empty, lits2); self.dedup(); } /// Deduplicate adjacent equivalent literals in this sequence. /// /// If adjacent literals are equivalent strings but one is exact and the /// other inexact, the inexact literal is kept and the exact one is /// removed. /// /// Deduping an infinite sequence is a no-op. /// /// # Example /// /// This example shows how literals that are duplicate byte strings but /// are not equivalent with respect to exactness are resolved. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::inexact("foo"), /// ]); /// seq.dedup(); /// /// assert_eq!(Seq::from_iter([Literal::inexact("foo")]), seq); /// ``` #[inline] pub fn dedup(&mut self) { if let Some(ref mut lits) = self.literals { lits.dedup_by(|lit1, lit2| { if lit1.as_bytes() != lit2.as_bytes() { return false; } if lit1.is_exact() != lit2.is_exact() { lit1.make_inexact(); lit2.make_inexact(); } true }); } } /// Sorts this sequence of literals lexicographically. /// /// Note that if, before sorting, if a literal that is a prefix of another /// literal appears after it, then after sorting, the sequence will not /// represent the same preference order match semantics. For example, /// sorting the sequence `[samwise, sam]` yields the sequence `[sam, /// samwise]`. Under preference order semantics, the latter sequence will /// never match `samwise` where as the first sequence can. /// /// # Example /// /// This example shows basic usage. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let mut seq = Seq::new(&["foo", "quux", "bar"]); /// seq.sort(); /// /// assert_eq!(Seq::new(&["bar", "foo", "quux"]), seq); /// ``` #[inline] pub fn sort(&mut self) { if let Some(ref mut lits) = self.literals { lits.sort(); } } /// Reverses all of the literals in this sequence. /// /// The order of the sequence itself is preserved. /// /// # Example /// /// This example shows basic usage. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let mut seq = Seq::new(&["oof", "rab"]); /// seq.reverse_literals(); /// assert_eq!(Seq::new(&["foo", "bar"]), seq); /// ``` #[inline] pub fn reverse_literals(&mut self) { if let Some(ref mut lits) = self.literals { for lit in lits.iter_mut() { lit.reverse(); } } } /// Shrinks this seq to its minimal size while respecting the preference /// order of its literals. /// /// While this routine will remove duplicate literals from this seq, it /// will also remove literals that can never match in a leftmost-first or /// "preference order" search. Similar to [`Seq::dedup`], if a literal is /// deduped, then the one that remains is made inexact. /// /// This is a no-op on seqs that are empty or not finite. /// /// # Example /// /// This example shows the difference between `{sam, samwise}` and /// `{samwise, sam}`. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// // If 'sam' comes before 'samwise' and a preference order search is /// // executed, then 'samwise' can never match. /// let mut seq = Seq::new(&["sam", "samwise"]); /// seq.minimize_by_preference(); /// assert_eq!(Seq::from_iter([Literal::inexact("sam")]), seq); /// /// // But if they are reversed, then it's possible for 'samwise' to match /// // since it is given higher preference. /// let mut seq = Seq::new(&["samwise", "sam"]); /// seq.minimize_by_preference(); /// assert_eq!(Seq::new(&["samwise", "sam"]), seq); /// ``` /// /// This example shows that if an empty string is in this seq, then /// anything that comes after it can never match. /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// // An empty string is a prefix of all strings, so it automatically /// // inhibits any subsequent strings from matching. /// let mut seq = Seq::new(&["foo", "bar", "", "quux", "fox"]); /// seq.minimize_by_preference(); /// let expected = Seq::from_iter([ /// Literal::exact("foo"), /// Literal::exact("bar"), /// Literal::inexact(""), /// ]); /// assert_eq!(expected, seq); /// /// // And of course, if it's at the beginning, then it makes it impossible /// // for anything else to match. /// let mut seq = Seq::new(&["", "foo", "quux", "fox"]); /// seq.minimize_by_preference(); /// assert_eq!(Seq::from_iter([Literal::inexact("")]), seq); /// ``` #[inline] pub fn minimize_by_preference(&mut self) { if let Some(ref mut lits) = self.literals { PreferenceTrie::minimize(lits, false); } } /// Trims all literals in this seq such that only the first `len` bytes /// remain. If a literal has less than or equal to `len` bytes, then it /// remains unchanged. Otherwise, it is trimmed and made inexact. /// /// # Example /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq = Seq::new(&["a", "foo", "quux"]); /// seq.keep_first_bytes(2); /// /// let expected = Seq::from_iter([ /// Literal::exact("a"), /// Literal::inexact("fo"), /// Literal::inexact("qu"), /// ]); /// assert_eq!(expected, seq); /// ``` #[inline] pub fn keep_first_bytes(&mut self, len: usize) { if let Some(ref mut lits) = self.literals { for m in lits.iter_mut() { m.keep_first_bytes(len); } } } /// Trims all literals in this seq such that only the last `len` bytes /// remain. If a literal has less than or equal to `len` bytes, then it /// remains unchanged. Otherwise, it is trimmed and made inexact. /// /// # Example /// /// ``` /// use regex_syntax::hir::literal::{Literal, Seq}; /// /// let mut seq = Seq::new(&["a", "foo", "quux"]); /// seq.keep_last_bytes(2); /// /// let expected = Seq::from_iter([ /// Literal::exact("a"), /// Literal::inexact("oo"), /// Literal::inexact("ux"), /// ]); /// assert_eq!(expected, seq); /// ``` #[inline] pub fn keep_last_bytes(&mut self, len: usize) { if let Some(ref mut lits) = self.literals { for m in lits.iter_mut() { m.keep_last_bytes(len); } } } /// Returns true if this sequence is finite. /// /// When false, this sequence is infinite and must be treated as if it /// contains every possible literal. #[inline] pub fn is_finite(&self) -> bool { self.literals.is_some() } /// Returns true if and only if this sequence is finite and empty. /// /// An empty sequence never matches anything. It can only be produced by /// literal extraction when the corresponding regex itself cannot match. #[inline] pub fn is_empty(&self) -> bool { self.len() == Some(0) } /// Returns the number of literals in this sequence if the sequence is /// finite. If the sequence is infinite, then `None` is returned. #[inline] pub fn len(&self) -> Option<usize> { self.literals.as_ref().map(|lits| lits.len()) } /// Returns true if and only if all literals in this sequence are exact. /// /// This returns false if the sequence is infinite. #[inline] pub fn is_exact(&self) -> bool { self.literals().map_or(false, |lits| lits.iter().all(|x| x.is_exact())) } /// Returns true if and only if all literals in this sequence are inexact. /// /// This returns true if the sequence is infinite. #[inline] pub fn is_inexact(&self) -> bool { self.literals().map_or(true, |lits| lits.iter().all(|x| !x.is_exact())) } /// Return the maximum length of the sequence that would result from /// unioning `self` with `other`. If either set is infinite, then this /// returns `None`. #[inline] pub fn max_union_len(&self, other: &Seq) -> Option<usize> { let len1 = self.len()?; let len2 = other.len()?; Some(len1.saturating_add(len2)) } /// Return the maximum length of the sequence that would result from the /// cross product of `self` with `other`. If either set is infinite, then /// this returns `None`. #[inline] pub fn max_cross_len(&self, other: &Seq) -> Option<usize> { let len1 = self.len()?; let len2 = other.len()?; Some(len1.saturating_mul(len2)) } /// Returns the length of the shortest literal in this sequence. /// /// If the sequence is infinite or empty, then this returns `None`. #[inline] pub fn min_literal_len(&self) -> Option<usize> { self.literals.as_ref()?.iter().map(|x| x.len()).min() } /// Returns the length of the longest literal in this sequence. /// /// If the sequence is infinite or empty, then this returns `None`. #[inline] pub fn max_literal_len(&self) -> Option<usize> { self.literals.as_ref()?.iter().map(|x| x.len()).max() } /// Returns the longest common prefix from this seq. /// /// If the seq matches any literal or other contains no literals, then /// there is no meaningful prefix and this returns `None`. /// /// # Example /// /// This shows some example seqs and their longest common prefix. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let seq = Seq::new(&["foo", "foobar", "fo"]); /// assert_eq!(Some(&b"fo"[..]), seq.longest_common_prefix()); /// let seq = Seq::new(&["foo", "foo"]); /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_prefix()); /// let seq = Seq::new(&["foo", "bar"]); /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); /// let seq = Seq::new(&[""]); /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); /// /// let seq = Seq::infinite(); /// assert_eq!(None, seq.longest_common_prefix()); /// let seq = Seq::empty(); /// assert_eq!(None, seq.longest_common_prefix()); /// ``` #[inline] pub fn longest_common_prefix(&self) -> Option<&[u8]> { // If we match everything or match nothing, then there's no meaningful // longest common prefix. let lits = match self.literals { None => return None, Some(ref lits) => lits, }; if lits.len() == 0 { return None; } let base = lits[0].as_bytes(); let mut len = base.len(); for m in lits.iter().skip(1) { len = m .as_bytes() .iter() .zip(base[..len].iter()) .take_while(|&(a, b)| a == b) .count(); if len == 0 { return Some(&[]); } } Some(&base[..len]) } /// Returns the longest common suffix from this seq. /// /// If the seq matches any literal or other contains no literals, then /// there is no meaningful suffix and this returns `None`. /// /// # Example /// /// This shows some example seqs and their longest common suffix. /// /// ``` /// use regex_syntax::hir::literal::Seq; /// /// let seq = Seq::new(&["oof", "raboof", "of"]); /// assert_eq!(Some(&b"of"[..]), seq.longest_common_suffix()); /// let seq = Seq::new(&["foo", "foo"]); /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_suffix()); /// let seq = Seq::new(&["foo", "bar"]); /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); /// let seq = Seq::new(&[""]); /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); /// /// let seq = Seq::infinite(); /// assert_eq!(None, seq.longest_common_suffix()); /// let seq = Seq::empty(); /// assert_eq!(None, seq.longest_common_suffix()); /// ``` #[inline] pub fn longest_common_suffix(&self) -> Option<&[u8]> { // If we match everything or match nothing, then there's no meaningful // longest common suffix. let lits = match self.literals { None => return None, Some(ref lits) => lits, }; if lits.len() == 0 { return None; } let base = lits[0].as_bytes(); let mut len = base.len(); for m in lits.iter().skip(1) { len = m .as_bytes() .iter() .rev() .zip(base[base.len() - len..].iter().rev()) .take_while(|&(a, b)| a == b) .count(); if len == 0 { return Some(&[]); } } Some(&base[base.len() - len..]) } /// Optimizes this seq while treating its literals as prefixes and /// respecting the preference order of its literals. /// /// The specific way "optimization" works is meant to be an implementation /// detail, as it essentially represents a set of heuristics. The goal /// that optimization tries to accomplish is to make the literals in this /// set reflect inputs that will result in a more effective prefilter. /// Principally by reducing the false positive rate of candidates found by /// the literals in this sequence. That is, when a match of a literal is /// found, we would like it to be a strong predictor of the overall match /// of the regex. If it isn't, then much time will be spent starting and /// stopping the prefilter search and attempting to confirm the match only /// to have it fail. /// /// Some of those heuristics might be: /// /// * Identifying a common prefix from a larger sequence of literals, and /// shrinking the sequence down to that single common prefix. /// * Rejecting the sequence entirely if it is believed to result in very /// high false positive rate. When this happens, the sequence is made /// infinite. /// * Shrinking the sequence to a smaller number of literals representing /// prefixes, but not shrinking it so much as to make literals too short. /// (A sequence with very short literals, of 1 or 2 bytes, will typically /// result in a higher false positive rate.) /// /// Optimization should only be run once extraction is complete. Namely, /// optimization may make assumptions that do not compose with other /// operations in the middle of extraction. For example, optimization will /// reduce `[E(sam), E(samwise)]` to `[E(sam)]`, but such a transformation /// is only valid if no other extraction will occur. If other extraction /// may occur, then the correct transformation would be to `[I(sam)]`. /// /// The [`Seq::optimize_for_suffix_by_preference`] does the same thing, but /// for suffixes. /// /// # Example /// /// This shows how optimization might transform a sequence. Note that /// the specific behavior is not a documented guarantee. The heuristics /// used are an implementation detail and may change over time in semver /// compatible releases. /// /// ``` /// use regex_syntax::hir::literal::{Seq, Literal}; /// /// let mut seq = Seq::new(&[ /// "samantha", /// "sam", /// "samwise", /// "frodo", /// ]); /// seq.optimize_for_prefix_by_preference(); /// assert_eq!(Seq::from_iter([ /// Literal::exact("samantha"), /// // Kept exact even though 'samwise' got pruned /// // because optimization assumes literal extraction /// // has finished. /// Literal::exact("sam"), /// Literal::exact("frodo"), /// ]), seq); /// ``` /// /// # Example: optimization may make the sequence infinite /// /// If the heuristics deem that the sequence could cause a very high false /// positive rate, then it may make the sequence infinite, effectively /// disabling its use as a prefilter. /// /// ``` /// use regex_syntax::hir::literal::{Seq, Literal}; /// /// let mut seq = Seq::new(&[ /// "samantha", /// // An empty string matches at every position, /// // thus rendering the prefilter completely /// // ineffective. /// "", /// "sam", /// "samwise", /// "frodo", /// ]); /// seq.optimize_for_prefix_by_preference(); /// assert!(!seq.is_finite()); /// ``` /// /// Do note that just because there is a `" "` in the sequence, that /// doesn't mean the sequence will always be made infinite after it is /// optimized. Namely, if the sequence is considered exact (any match /// corresponds to an overall match of the original regex), then any match /// is an overall match, and so the false positive rate is always `0`. /// /// To demonstrate this, we remove `samwise` from our sequence. This /// results in no optimization happening and all literals remain exact. /// Thus the entire sequence is exact, and it is kept as-is, even though /// one is an ASCII space: /// /// ``` /// use regex_syntax::hir::literal::{Seq, Literal}; /// /// let mut seq = Seq::new(&[ /// "samantha", /// " ", /// "sam", /// "frodo", /// ]); /// seq.optimize_for_prefix_by_preference(); /// assert!(seq.is_finite()); /// ``` #[inline] pub fn optimize_for_prefix_by_preference(&mut self) { self.optimize_by_preference(true); } /// Optimizes this seq while treating its literals as suffixes and /// respecting the preference order of its literals. /// /// Optimization should only be run once extraction is complete. /// /// The [`Seq::optimize_for_prefix_by_preference`] does the same thing, but /// for prefixes. See its documentation for more explanation. #[inline] pub fn optimize_for_suffix_by_preference(&mut self) { self.optimize_by_preference(false); } fn optimize_by_preference(&mut self, prefix: bool) { let origlen = match self.len() { None => return, Some(len) => len, }; // Just give up now if our sequence contains an empty string. if self.min_literal_len().map_or(false, |len| len == 0) { // We squash the sequence so that nobody else gets any bright // ideas to try and use it. An empty string implies a match at // every position. A prefilter cannot help you here. self.make_infinite(); return; } // Make sure we start with the smallest sequence possible. We use a // special version of preference minimization that retains exactness. // This is legal because optimization is only expected to occur once // extraction is complete. if prefix { if let Some(ref mut lits) = self.literals { PreferenceTrie::minimize(lits, true); } } // Look for a common prefix (or suffix). If we found one of those and // it's long enough, then it's a good bet that it will be our fastest // possible prefilter since single-substring search is so fast. let fix = if prefix { self.longest_common_prefix() } else { self.longest_common_suffix() }; if let Some(fix) = fix { // As a special case, if we have a common prefix and the leading // byte of that prefix is one that we think probably occurs rarely, // then strip everything down to just that single byte. This should // promote the use of memchr. // // ... we only do this though if our sequence has more than one // literal. Otherwise, we'd rather just stick with a single literal // scan. That is, using memchr is probably better than looking // for 2 or more literals, but probably not as good as a straight // memmem search. // // ... and also only do this when the prefix is short and probably // not too discriminatory anyway. If it's longer, then it's // probably quite discriminatory and thus is likely to have a low // false positive rate. if prefix && origlen > 1 && fix.len() >= 1 && fix.len() <= 3 && rank(fix[0]) < 200 { self.keep_first_bytes(1); self.dedup(); return; } // We only strip down to the common prefix/suffix if we think // the existing set of literals isn't great, or if the common // prefix/suffix is expected to be particularly discriminatory. let isfast = self.is_exact() && self.len().map_or(false, |len| len <= 16); let usefix = fix.len() > 4 || (fix.len() > 1 && !isfast); if usefix { // If we keep exactly the number of bytes equal to the length // of the prefix (or suffix), then by the definition of a // prefix, every literal in the sequence will be equivalent. // Thus, 'dedup' will leave us with one literal. // // We do it this way to avoid an alloc, but also to make sure // the exactness of literals is kept (or not). if prefix { self.keep_first_bytes(fix.len()); } else { self.keep_last_bytes(fix.len()); } self.dedup(); assert_eq!(Some(1), self.len()); // We still fall through here. In particular, we want our // longest common prefix to be subject to the poison check. } } // If we have an exact sequence, we *probably* just want to keep it // as-is. But there are some cases where we don't. So we save a copy of // the exact sequence now, and then try to do some more optimizations // below. If those don't work out, we go back to this exact sequence. // // The specific motivation for this is that we sometimes wind up with // an exact sequence with a hefty number of literals. Say, 100. If we // stuck with that, it would be too big for Teddy and would result in // using Aho-Corasick. Which is fine... but the lazy DFA is plenty // suitable in such cases. The real issue is that we will wind up not // using a fast prefilter at all. So in cases like this, even though // we have an exact sequence, it would be better to try and shrink the // sequence (which we do below) and use it as a prefilter that can // produce false positive matches. // // But if the shrinking below results in a sequence that "sucks," then // we don't want to use that because we already have an exact sequence // in hand. let exact: Option<Seq> = if self.is_exact() { Some(self.clone()) } else { None }; // Now we attempt to shorten the sequence. The idea here is that we // don't want to look for too many literals, but we want to shorten // our sequence enough to improve our odds of using better algorithms // downstream (such as Teddy). // // The pair of numbers in this list corresponds to the maximal prefix // (in bytes) to keep for all literals and the length of the sequence // at which to do it. // // So for example, the pair (3, 500) would mean, "if we have more than // 500 literals in our sequence, then truncate all of our literals // such that they are at most 3 bytes in length and the minimize the // sequence." const ATTEMPTS: [(usize, usize); 5] = [(5, 10), (4, 10), (3, 64), (2, 64), (1, 10)]; for (keep, limit) in ATTEMPTS { let len = match self.len() { None => break, Some(len) => len, }; if len <= limit { break; } if prefix { self.keep_first_bytes(keep); } else { self.keep_last_bytes(keep); } if prefix { if let Some(ref mut lits) = self.literals { PreferenceTrie::minimize(lits, true); } } } // Check for a poison literal. A poison literal is one that is short // and is believed to have a very high match count. These poisons // generally lead to a prefilter with a very high false positive rate, // and thus overall worse performance. // // We do this last because we could have gone from a non-poisonous // sequence to a poisonous one. Perhaps we should add some code to // prevent such transitions in the first place, but then again, we // likely only made the transition in the first place if the sequence // was itself huge. And huge sequences are themselves poisonous. So... if let Some(lits) = self.literals() { if lits.iter().any(|lit| lit.is_poisonous()) { self.make_infinite(); } } // OK, if we had an exact sequence before attempting more optimizations // above and our post-optimized sequence sucks for some reason or // another, then we go back to the exact sequence. if let Some(exact) = exact { // If optimizing resulted in dropping our literals, then certainly // backup and use the exact sequence that we had. if !self.is_finite() { *self = exact; return; } // If our optimized sequence contains a short literal, then it's // *probably* not so great. So throw it away and revert to the // exact sequence. if self.min_literal_len().map_or(true, |len| len <= 2) { *self = exact; return; } // Finally, if our optimized sequence is "big" (i.e., can't use // Teddy), then also don't use it and rely on the exact sequence. if self.len().map_or(true, |len| len > 64) { *self = exact; return; } } } } impl core::fmt::Debug for Seq { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "Seq")?; if let Some(lits) = self.literals() { f.debug_list().entries(lits.iter()).finish() } else { write!(f, "[∞]") } } } impl FromIterator<Literal> for Seq { fn from_iter<T: IntoIterator<Item = Literal>>(it: T) -> Seq { let mut seq = Seq::empty(); for literal in it { seq.push(literal); } seq } } /// A single literal extracted from an [`Hir`] expression. /// /// A literal is composed of two things: /// /// * A sequence of bytes. No guarantees with respect to UTF-8 are provided. /// In particular, even if the regex a literal is extracted from is UTF-8, the /// literal extracted may not be valid UTF-8. (For example, if an [`Extractor`] /// limit resulted in trimming a literal in a way that splits a codepoint.) /// * Whether the literal is "exact" or not. An "exact" literal means that it /// has not been trimmed, and may continue to be extended. If a literal is /// "exact" after visiting the entire `Hir` expression, then this implies that /// the literal leads to a match state. (Although it doesn't necessarily imply /// all occurrences of the literal correspond to a match of the regex, since /// literal extraction ignores look-around assertions.) #[derive(Clone, Eq, PartialEq, PartialOrd, Ord)] pub struct Literal { bytes: Vec<u8>, exact: bool, } impl Literal { /// Returns a new exact literal containing the bytes given. #[inline] pub fn exact<B: Into<Vec<u8>>>(bytes: B) -> Literal { Literal { bytes: bytes.into(), exact: true } } /// Returns a new inexact literal containing the bytes given. #[inline] pub fn inexact<B: Into<Vec<u8>>>(bytes: B) -> Literal { Literal { bytes: bytes.into(), exact: false } } /// Returns the bytes in this literal. #[inline] pub fn as_bytes(&self) -> &[u8] { &self.bytes } /// Yields ownership of the bytes inside this literal. /// /// Note that this throws away whether the literal is "exact" or not. #[inline] pub fn into_bytes(self) -> Vec<u8> { self.bytes } /// Returns the length of this literal in bytes. #[inline] pub fn len(&self) -> usize { self.as_bytes().len() } /// Returns true if and only if this literal has zero bytes. #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns true if and only if this literal is exact. #[inline] pub fn is_exact(&self) -> bool { self.exact } /// Marks this literal as inexact. /// /// Inexact literals can never be extended. For example, /// [`Seq::cross_forward`] will not extend inexact literals. #[inline] pub fn make_inexact(&mut self) { self.exact = false; } /// Reverse the bytes in this literal. #[inline] pub fn reverse(&mut self) { self.bytes.reverse(); } /// Extend this literal with the literal given. /// /// If this literal is inexact, then this is a no-op. #[inline] pub fn extend(&mut self, lit: &Literal) { if !self.is_exact() { return; } self.bytes.extend_from_slice(&lit.bytes); } /// Trims this literal such that only the first `len` bytes remain. If /// this literal has fewer than `len` bytes, then it remains unchanged. /// Otherwise, the literal is marked as inexact. #[inline] pub fn keep_first_bytes(&mut self, len: usize) { if len >= self.len() { return; } self.make_inexact(); self.bytes.truncate(len); } /// Trims this literal such that only the last `len` bytes remain. If this /// literal has fewer than `len` bytes, then it remains unchanged. /// Otherwise, the literal is marked as inexact. #[inline] pub fn keep_last_bytes(&mut self, len: usize) { if len >= self.len() { return; } self.make_inexact(); self.bytes.drain(..self.len() - len); } /// Returns true if it is believe that this literal is likely to match very /// frequently, and is thus not a good candidate for a prefilter. fn is_poisonous(&self) -> bool { self.is_empty() || (self.len() == 1 && rank(self.as_bytes()[0]) >= 250) } } impl From<u8> for Literal { fn from(byte: u8) -> Literal { Literal::exact(vec![byte]) } } impl From<char> for Literal { fn from(ch: char) -> Literal { use alloc::string::ToString; Literal::exact(ch.encode_utf8(&mut [0; 4]).to_string()) } } impl AsRef<[u8]> for Literal { fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl core::fmt::Debug for Literal { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let tag = if self.exact { "E" } else { "I" }; f.debug_tuple(tag) .field(&crate::debug::Bytes(self.as_bytes())) .finish() } } /// A "preference" trie that rejects literals that will never match when /// executing a leftmost first or "preference" search. /// /// For example, if 'sam' is inserted, then trying to insert 'samwise' will be /// rejected because 'samwise' can never match since 'sam' will always take /// priority. However, if 'samwise' is inserted first, then inserting 'sam' /// after it is accepted. In this case, either 'samwise' or 'sam' can match in /// a "preference" search. /// /// Note that we only use this trie as a "set." That is, given a sequence of /// literals, we insert each one in order. An `insert` will reject a literal /// if a prefix of that literal already exists in the trie. Thus, to rebuild /// the "minimal" sequence, we simply only keep literals that were successfully /// inserted. (Since we don't need traversal, one wonders whether we can make /// some simplifications here, but I haven't given it a ton of thought and I've /// never seen this show up on a profile. Because of the heuristic limits /// imposed on literal extractions, the size of the inputs here is usually /// very small.) #[derive(Debug)] struct PreferenceTrie { /// The states in this trie. The index of a state in this vector is its ID. states: Vec<State>, /// This vec indicates which states are match states. It always has /// the same length as `states` and is indexed by the same state ID. /// A state with identifier `sid` is a match state if and only if /// `matches[sid].is_some()`. The option contains the index of the literal /// corresponding to the match. The index is offset by 1 so that it fits in /// a NonZeroUsize. matches: Vec<Option<NonZeroUsize>>, /// The index to allocate to the next literal added to this trie. Starts at /// 1 and increments by 1 for every literal successfully added to the trie. next_literal_index: usize, } /// A single state in a trie. Uses a sparse representation for its transitions. #[derive(Debug, Default)] struct State { /// Sparse representation of the transitions out of this state. Transitions /// are sorted by byte. There is at most one such transition for any /// particular byte. trans: Vec<(u8, usize)>, } impl PreferenceTrie { /// Minimizes the given sequence of literals while preserving preference /// order semantics. /// /// When `keep_exact` is true, the exactness of every literal retained is /// kept. This is useful when dealing with a fully extracted `Seq` that /// only contains exact literals. In that case, we can keep all retained /// literals as exact because we know we'll never need to match anything /// after them and because any removed literals are guaranteed to never /// match. fn minimize(literals: &mut Vec<Literal>, keep_exact: bool) { use core::cell::RefCell; // MSRV(1.61): Use retain_mut here to avoid interior mutability. let trie = RefCell::new(PreferenceTrie { states: vec![], matches: vec![], next_literal_index: 1, }); let mut make_inexact = vec![]; literals.retain(|lit| { match trie.borrow_mut().insert(lit.as_bytes()) { Ok(_) => true, Err(i) => { if !keep_exact { make_inexact.push(i.checked_sub(1).unwrap()); } false } } }); for i in make_inexact { literals[i].make_inexact(); } } /// Returns `Ok` if the given byte string is accepted into this trie and /// `Err` otherwise. The index for the success case corresponds to the /// index of the literal added. The index for the error case corresponds to /// the index of the literal already in the trie that prevented the given /// byte string from being added. (Which implies it is a prefix of the one /// given.) /// /// In short, the byte string given is accepted into the trie if and only /// if it is possible for it to match when executing a preference order /// search. fn insert(&mut self, bytes: &[u8]) -> Result<usize, usize> { let mut prev = self.root(); if let Some(idx) = self.matches[prev] { return Err(idx.get()); } for &b in bytes.iter() { match self.states[prev].trans.binary_search_by_key(&b, |t| t.0) { Ok(i) => { prev = self.states[prev].trans[i].1; if let Some(idx) = self.matches[prev] { return Err(idx.get()); } } Err(i) => { let next = self.create_state(); self.states[prev].trans.insert(i, (b, next)); prev = next; } } } let idx = self.next_literal_index; self.next_literal_index += 1; self.matches[prev] = NonZeroUsize::new(idx); Ok(idx) } /// Returns the root state ID, and if it doesn't exist, creates it. fn root(&mut self) -> usize { if !self.states.is_empty() { 0 } else { self.create_state() } } /// Creates a new empty state and returns its ID. fn create_state(&mut self) -> usize { let id = self.states.len(); self.states.push(State::default()); self.matches.push(None); id } } /// Returns the "rank" of the given byte. /// /// The minimum rank value is `0` and the maximum rank value is `255`. /// /// The rank of a byte is derived from a heuristic background distribution of /// relative frequencies of bytes. The heuristic says that lower the rank of a /// byte, the less likely that byte is to appear in any arbitrary haystack. pub fn rank(byte: u8) -> u8 { crate::rank::BYTE_FREQUENCIES[usize::from(byte)] } #[cfg(test)] mod tests { use super::*; fn parse(pattern: &str) -> Hir { crate::ParserBuilder::new().utf8(false).build().parse(pattern).unwrap() } fn prefixes(pattern: &str) -> Seq { Extractor::new().kind(ExtractKind::Prefix).extract(&parse(pattern)) } fn suffixes(pattern: &str) -> Seq { Extractor::new().kind(ExtractKind::Suffix).extract(&parse(pattern)) } fn e(pattern: &str) -> (Seq, Seq) { (prefixes(pattern), suffixes(pattern)) } #[allow(non_snake_case)] fn E(x: &str) -> Literal { Literal::exact(x.as_bytes()) } #[allow(non_snake_case)] fn I(x: &str) -> Literal { Literal::inexact(x.as_bytes()) } fn seq<I: IntoIterator<Item = Literal>>(it: I) -> Seq { Seq::from_iter(it) } fn infinite() -> (Seq, Seq) { (Seq::infinite(), Seq::infinite()) } fn inexact<I1, I2>(it1: I1, it2: I2) -> (Seq, Seq) where I1: IntoIterator<Item = Literal>, I2: IntoIterator<Item = Literal>, { (Seq::from_iter(it1), Seq::from_iter(it2)) } fn exact<B: AsRef<[u8]>, I: IntoIterator<Item = B>>(it: I) -> (Seq, Seq) { let s1 = Seq::new(it); let s2 = s1.clone(); (s1, s2) } fn opt<B: AsRef<[u8]>, I: IntoIterator<Item = B>>(it: I) -> (Seq, Seq) { let (mut p, mut s) = exact(it); p.optimize_for_prefix_by_preference(); s.optimize_for_suffix_by_preference(); (p, s) } #[test] fn literal() { assert_eq!(exact(["a"]), e("a")); assert_eq!(exact(["aaaaa"]), e("aaaaa")); assert_eq!(exact(["A", "a"]), e("(?i-u)a")); assert_eq!(exact(["AB", "Ab", "aB", "ab"]), e("(?i-u)ab")); assert_eq!(exact(["abC", "abc"]), e("ab(?i-u)c")); assert_eq!(exact([b"\xFF"]), e(r"(?-u:\xFF)")); #[cfg(feature = "unicode-case")] { assert_eq!(exact(["☃"]), e("☃")); assert_eq!(exact(["☃"]), e("(?i)☃")); assert_eq!(exact(["☃☃☃☃☃"]), e("☃☃☃☃☃")); assert_eq!(exact(["Δ"]), e("Δ")); assert_eq!(exact(["δ"]), e("δ")); assert_eq!(exact(["Δ", "δ"]), e("(?i)Δ")); assert_eq!(exact(["Δ", "δ"]), e("(?i)δ")); assert_eq!(exact(["S", "s", "ſ"]), e("(?i)S")); assert_eq!(exact(["S", "s", "ſ"]), e("(?i)s")); assert_eq!(exact(["S", "s", "ſ"]), e("(?i)ſ")); } let letters = "ͱͳͷΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋ"; assert_eq!(exact([letters]), e(letters)); } #[test] fn class() { assert_eq!(exact(["a", "b", "c"]), e("[abc]")); assert_eq!(exact(["a1b", "a2b", "a3b"]), e("a[123]b")); assert_eq!(exact(["δ", "ε"]), e("[εδ]")); #[cfg(feature = "unicode-case")] { assert_eq!(exact(["Δ", "Ε", "δ", "ε", "ϵ"]), e(r"(?i)[εδ]")); } } #[test] fn look() { assert_eq!(exact(["ab"]), e(r"a\Ab")); assert_eq!(exact(["ab"]), e(r"a\zb")); assert_eq!(exact(["ab"]), e(r"a(?m:^)b")); assert_eq!(exact(["ab"]), e(r"a(?m:$)b")); assert_eq!(exact(["ab"]), e(r"a\bb")); assert_eq!(exact(["ab"]), e(r"a\Bb")); assert_eq!(exact(["ab"]), e(r"a(?-u:\b)b")); assert_eq!(exact(["ab"]), e(r"a(?-u:\B)b")); assert_eq!(exact(["ab"]), e(r"^ab")); assert_eq!(exact(["ab"]), e(r"$ab")); assert_eq!(exact(["ab"]), e(r"(?m:^)ab")); assert_eq!(exact(["ab"]), e(r"(?m:$)ab")); assert_eq!(exact(["ab"]), e(r"\bab")); assert_eq!(exact(["ab"]), e(r"\Bab")); assert_eq!(exact(["ab"]), e(r"(?-u:\b)ab")); assert_eq!(exact(["ab"]), e(r"(?-u:\B)ab")); assert_eq!(exact(["ab"]), e(r"ab^")); assert_eq!(exact(["ab"]), e(r"ab$")); assert_eq!(exact(["ab"]), e(r"ab(?m:^)")); assert_eq!(exact(["ab"]), e(r"ab(?m:$)")); assert_eq!(exact(["ab"]), e(r"ab\b")); assert_eq!(exact(["ab"]), e(r"ab\B")); assert_eq!(exact(["ab"]), e(r"ab(?-u:\b)")); assert_eq!(exact(["ab"]), e(r"ab(?-u:\B)")); let expected = (seq([I("aZ"), E("ab")]), seq([I("Zb"), E("ab")])); assert_eq!(expected, e(r"^aZ*b")); } #[test] fn repetition() { assert_eq!(exact(["a", ""]), e(r"a?")); assert_eq!(exact(["", "a"]), e(r"a??")); assert_eq!(inexact([I("a"), E("")], [I("a"), E("")]), e(r"a*")); assert_eq!(inexact([E(""), I("a")], [E(""), I("a")]), e(r"a*?")); assert_eq!(inexact([I("a")], [I("a")]), e(r"a+")); assert_eq!(inexact([I("a")], [I("a")]), e(r"(a+)+")); assert_eq!(exact(["ab"]), e(r"aZ{0}b")); assert_eq!(exact(["aZb", "ab"]), e(r"aZ?b")); assert_eq!(exact(["ab", "aZb"]), e(r"aZ??b")); assert_eq!( inexact([I("aZ"), E("ab")], [I("Zb"), E("ab")]), e(r"aZ*b") ); assert_eq!( inexact([E("ab"), I("aZ")], [E("ab"), I("Zb")]), e(r"aZ*?b") ); assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+b")); assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+?b")); assert_eq!(exact(["aZZb"]), e(r"aZ{2}b")); assert_eq!(inexact([I("aZZ")], [I("ZZb")]), e(r"aZ{2,3}b")); assert_eq!(exact(["abc", ""]), e(r"(abc)?")); assert_eq!(exact(["", "abc"]), e(r"(abc)??")); assert_eq!(inexact([I("a"), E("b")], [I("ab"), E("b")]), e(r"a*b")); assert_eq!(inexact([E("b"), I("a")], [E("b"), I("ab")]), e(r"a*?b")); assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); assert_eq!(inexact([I("a"), I("b")], [I("b")]), e(r"a*b+")); // FIXME: The suffixes for this don't look quite right to me. I think // the right suffixes would be: [I(ac), I(bc), E(c)]. The main issue I // think is that suffixes are computed by iterating over concatenations // in reverse, and then [bc, ac, c] ordering is indeed correct from // that perspective. We also test a few more equivalent regexes, and // we get the same result, so it is consistent at least I suppose. // // The reason why this isn't an issue is that it only messes up // preference order, and currently, suffixes are never used in a // context where preference order matters. For prefixes it matters // because we sometimes want to use prefilters without confirmation // when all of the literals are exact (and there's no look-around). But // we never do that for suffixes. Any time we use suffixes, we always // include a confirmation step. If that ever changes, then it's likely // this bug will need to be fixed, but last time I looked, it appears // hard to do so. assert_eq!( inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), e(r"a*b*c") ); assert_eq!( inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), e(r"(a+)?(b+)?c") ); assert_eq!( inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), e(r"(a+|)(b+|)c") ); // A few more similarish but not identical regexes. These may have a // similar problem as above. assert_eq!( inexact( [I("a"), I("b"), I("c"), E("")], [I("c"), I("b"), I("a"), E("")] ), e(r"a*b*c*") ); assert_eq!(inexact([I("a"), I("b"), I("c")], [I("c")]), e(r"a*b*c+")); assert_eq!(inexact([I("a"), I("b")], [I("bc")]), e(r"a*b+c")); assert_eq!(inexact([I("a"), I("b")], [I("c"), I("b")]), e(r"a*b+c*")); assert_eq!(inexact([I("ab"), E("a")], [I("b"), E("a")]), e(r"ab*")); assert_eq!( inexact([I("ab"), E("ac")], [I("bc"), E("ac")]), e(r"ab*c") ); assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); assert_eq!(inexact([I("ab")], [I("bc")]), e(r"ab+c")); assert_eq!( inexact([I("z"), E("azb")], [I("zazb"), E("azb")]), e(r"z*azb") ); let expected = exact(["aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"]); assert_eq!(expected, e(r"[ab]{3}")); let expected = inexact( [ I("aaa"), I("aab"), I("aba"), I("abb"), I("baa"), I("bab"), I("bba"), I("bbb"), ], [ I("aaa"), I("aab"), I("aba"), I("abb"), I("baa"), I("bab"), I("bba"), I("bbb"), ], ); assert_eq!(expected, e(r"[ab]{3,4}")); } #[test] fn concat() { let empty: [&str; 0] = []; assert_eq!(exact(["abcxyz"]), e(r"abc()xyz")); assert_eq!(exact(["abcxyz"]), e(r"(abc)(xyz)")); assert_eq!(exact(["abcmnoxyz"]), e(r"abc()mno()xyz")); assert_eq!(exact(empty), e(r"abc[a&&b]xyz")); assert_eq!(exact(["abcxyz"]), e(r"abc[a&&b]*xyz")); } #[test] fn alternation() { assert_eq!(exact(["abc", "mno", "xyz"]), e(r"abc|mno|xyz")); assert_eq!( inexact( [E("abc"), I("mZ"), E("mo"), E("xyz")], [E("abc"), I("Zo"), E("mo"), E("xyz")] ), e(r"abc|mZ*o|xyz") ); assert_eq!(exact(["abc", "xyz"]), e(r"abc|M[a&&b]N|xyz")); assert_eq!(exact(["abc", "MN", "xyz"]), e(r"abc|M[a&&b]*N|xyz")); assert_eq!(exact(["aaa", "aaaaa"]), e(r"(?:|aa)aaa")); assert_eq!( inexact( [I("aaa"), E(""), I("aaaaa"), E("aa")], [I("aaa"), E(""), E("aa")] ), e(r"(?:|aa)(?:aaa)*") ); assert_eq!( inexact( [E(""), I("aaa"), E("aa"), I("aaaaa")], [E(""), I("aaa"), E("aa")] ), e(r"(?:|aa)(?:aaa)*?") ); assert_eq!( inexact([E("a"), I("b"), E("")], [E("a"), I("b"), E("")]), e(r"a|b*") ); assert_eq!(inexact([E("a"), I("b")], [E("a"), I("b")]), e(r"a|b+")); assert_eq!( inexact([I("a"), E("b"), E("c")], [I("ab"), E("b"), E("c")]), e(r"a*b|c") ); assert_eq!( inexact( [E("a"), E("b"), I("c"), E("")], [E("a"), E("b"), I("c"), E("")] ), e(r"a|(?:b|c*)") ); assert_eq!( inexact( [I("a"), I("b"), E("c"), I("a"), I("ab"), E("c")], [I("ac"), I("bc"), E("c"), I("ac"), I("abc"), E("c")], ), e(r"(a|b)*c|(a|ab)*c") ); assert_eq!( exact(["abef", "abgh", "cdef", "cdgh"]), e(r"(ab|cd)(ef|gh)") ); assert_eq!( exact([ "abefij", "abefkl", "abghij", "abghkl", "cdefij", "cdefkl", "cdghij", "cdghkl", ]), e(r"(ab|cd)(ef|gh)(ij|kl)") ); assert_eq!(inexact([E("abab")], [E("abab")]), e(r"(ab){2}")); assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,3}")); assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,}")); } #[test] fn impossible() { let empty: [&str; 0] = []; assert_eq!(exact(empty), e(r"[a&&b]")); assert_eq!(exact(empty), e(r"a[a&&b]")); assert_eq!(exact(empty), e(r"[a&&b]b")); assert_eq!(exact(empty), e(r"a[a&&b]b")); assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]|b")); assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]|b")); assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]d|b")); assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]d|b")); assert_eq!(exact([""]), e(r"[a&&b]*")); assert_eq!(exact(["MN"]), e(r"M[a&&b]*N")); } // This tests patterns that contain something that defeats literal // detection, usually because it would blow some limit on the total number // of literals that can be returned. // // The main idea is that when literal extraction sees something that // it knows will blow a limit, it replaces it with a marker that says // "any literal will match here." While not necessarily true, the // over-estimation is just fine for the purposes of literal extraction, // because the imprecision doesn't matter: too big is too big. // // This is one of the trickier parts of literal extraction, since we need // to make sure all of our literal extraction operations correctly compose // with the markers. #[test] fn anything() { assert_eq!(infinite(), e(r".")); assert_eq!(infinite(), e(r"(?s).")); assert_eq!(infinite(), e(r"[A-Za-z]")); assert_eq!(infinite(), e(r"[A-Z]")); assert_eq!(exact([""]), e(r"[A-Z]{0}")); assert_eq!(infinite(), e(r"[A-Z]?")); assert_eq!(infinite(), e(r"[A-Z]*")); assert_eq!(infinite(), e(r"[A-Z]+")); assert_eq!((seq([I("1")]), Seq::infinite()), e(r"1[A-Z]")); assert_eq!((seq([I("1")]), seq([I("2")])), e(r"1[A-Z]2")); assert_eq!((Seq::infinite(), seq([I("123")])), e(r"[A-Z]+123")); assert_eq!(infinite(), e(r"[A-Z]+123[A-Z]+")); assert_eq!(infinite(), e(r"1|[A-Z]|3")); assert_eq!( (seq([E("1"), I("2"), E("3")]), Seq::infinite()), e(r"1|2[A-Z]|3"), ); assert_eq!( (Seq::infinite(), seq([E("1"), I("2"), E("3")])), e(r"1|[A-Z]2|3"), ); assert_eq!( (seq([E("1"), I("2"), E("4")]), seq([E("1"), I("3"), E("4")])), e(r"1|2[A-Z]3|4"), ); assert_eq!((Seq::infinite(), seq([I("2")])), e(r"(?:|1)[A-Z]2")); assert_eq!(inexact([I("a")], [I("z")]), e(r"a.z")); } // Like the 'anything' test, but it uses smaller limits in order to test // the logic for effectively aborting literal extraction when the seqs get // too big. #[test] fn anything_small_limits() { fn prefixes(pattern: &str) -> Seq { Extractor::new() .kind(ExtractKind::Prefix) .limit_total(10) .extract(&parse(pattern)) } fn suffixes(pattern: &str) -> Seq { Extractor::new() .kind(ExtractKind::Suffix) .limit_total(10) .extract(&parse(pattern)) } fn e(pattern: &str) -> (Seq, Seq) { (prefixes(pattern), suffixes(pattern)) } assert_eq!( ( seq([ I("aaa"), I("aab"), I("aba"), I("abb"), I("baa"), I("bab"), I("bba"), I("bbb") ]), seq([ I("aaa"), I("aab"), I("aba"), I("abb"), I("baa"), I("bab"), I("bba"), I("bbb") ]) ), e(r"[ab]{3}{3}") ); assert_eq!(infinite(), e(r"ab|cd|ef|gh|ij|kl|mn|op|qr|st|uv|wx|yz")); } #[test] fn empty() { assert_eq!(exact([""]), e(r"")); assert_eq!(exact([""]), e(r"^")); assert_eq!(exact([""]), e(r"$")); assert_eq!(exact([""]), e(r"(?m:^)")); assert_eq!(exact([""]), e(r"(?m:$)")); assert_eq!(exact([""]), e(r"\b")); assert_eq!(exact([""]), e(r"\B")); assert_eq!(exact([""]), e(r"(?-u:\b)")); assert_eq!(exact([""]), e(r"(?-u:\B)")); } #[test] fn odds_and_ends() { assert_eq!((Seq::infinite(), seq([I("a")])), e(r".a")); assert_eq!((seq([I("a")]), Seq::infinite()), e(r"a.")); assert_eq!(infinite(), e(r"a|.")); assert_eq!(infinite(), e(r".|a")); let pat = r"M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]"; let expected = inexact( ["Mo'am", "Moam", "Mu'am", "Muam"].map(I), [ "ddafi", "ddafy", "dhafi", "dhafy", "dzafi", "dzafy", "dafi", "dafy", "tdafi", "tdafy", "thafi", "thafy", "tzafi", "tzafy", "tafi", "tafy", "zdafi", "zdafy", "zhafi", "zhafy", "zzafi", "zzafy", "zafi", "zafy", ] .map(I), ); assert_eq!(expected, e(pat)); assert_eq!( (seq(["fn is_", "fn as_"].map(I)), Seq::infinite()), e(r"fn is_([A-Z]+)|fn as_([A-Z]+)"), ); assert_eq!( inexact([I("foo")], [I("quux")]), e(r"foo[A-Z]+bar[A-Z]+quux") ); assert_eq!(infinite(), e(r"[A-Z]+bar[A-Z]+")); assert_eq!( exact(["Sherlock Holmes"]), e(r"(?m)^Sherlock Holmes|Sherlock Holmes$") ); assert_eq!(exact(["sa", "sb"]), e(r"\bs(?:[ab])")); } // This tests a specific regex along with some heuristic steps to reduce // the sequences extracted. This is meant to roughly correspond to the // types of heuristics used to shrink literal sets in practice. (Shrinking // is done because you want to balance "spend too much work looking for // too many literals" and "spend too much work processing false positive // matches from short literals.") #[test] #[cfg(feature = "unicode-case")] fn holmes() { let expected = inexact( ["HOL", "HOl", "HoL", "Hol", "hOL", "hOl", "hoL", "hol"].map(I), [ "MES", "MEs", "Eſ", "MeS", "Mes", "eſ", "mES", "mEs", "meS", "mes", ] .map(I), ); let (mut prefixes, mut suffixes) = e(r"(?i)Holmes"); prefixes.keep_first_bytes(3); suffixes.keep_last_bytes(3); prefixes.minimize_by_preference(); suffixes.minimize_by_preference(); assert_eq!(expected, (prefixes, suffixes)); } // This tests that we get some kind of literals extracted for a beefier // alternation with case insensitive mode enabled. At one point during // development, this returned nothing, and motivated some special case // code in Extractor::union to try and trim down the literal sequences // if the union would blow the limits set. #[test] #[cfg(feature = "unicode-case")] fn holmes_alt() { let mut pre = prefixes(r"(?i)Sherlock|Holmes|Watson|Irene|Adler|John|Baker"); assert!(pre.len().unwrap() > 0); pre.optimize_for_prefix_by_preference(); assert!(pre.len().unwrap() > 0); } // See: https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8 // See: CVE-2022-24713 // // We test this here to ensure literal extraction completes in reasonable // time and isn't materially impacted by these sorts of pathological // repeats. #[test] fn crazy_repeats() { assert_eq!(inexact([E("")], [E("")]), e(r"(?:){4294967295}")); assert_eq!( inexact([E("")], [E("")]), e(r"(?:){64}{64}{64}{64}{64}{64}") ); assert_eq!(inexact([E("")], [E("")]), e(r"x{0}{4294967295}")); assert_eq!(inexact([E("")], [E("")]), e(r"(?:|){4294967295}")); assert_eq!( inexact([E("")], [E("")]), e(r"(?:){8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") ); let repa = "a".repeat(100); assert_eq!( inexact([I(&repa)], [I(&repa)]), e(r"a{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") ); } #[test] fn huge() { let pat = r#"(?-u) 2(?: [45]\d{3}| 7(?: 1[0-267]| 2[0-289]| 3[0-29]| 4[01]| 5[1-3]| 6[013]| 7[0178]| 91 )| 8(?: 0[125]| [139][1-6]| 2[0157-9]| 41| 6[1-35]| 7[1-5]| 8[1-8]| 90 )| 9(?: 0[0-2]| 1[0-4]| 2[568]| 3[3-6]| 5[5-7]| 6[0167]| 7[15]| 8[0146-9] ) )\d{4}| 3(?: 12?[5-7]\d{2}| 0(?: 2(?: [025-79]\d| [348]\d{1,2} )| 3(?: [2-4]\d| [56]\d? ) )| 2(?: 1\d{2}| 2(?: [12]\d| [35]\d{1,2}| 4\d? ) )| 3(?: 1\d{2}| 2(?: [2356]\d| 4\d{1,2} ) )| 4(?: 1\d{2}| 2(?: 2\d{1,2}| [47]| 5\d{2} ) )| 5(?: 1\d{2}| 29 )| [67]1\d{2}| 8(?: 1\d{2}| 2(?: 2\d{2}| 3| 4\d ) ) )\d{3}| 4(?: 0(?: 2(?: [09]\d| 7 )| 33\d{2} )| 1\d{3}| 2(?: 1\d{2}| 2(?: [25]\d?| [348]\d| [67]\d{1,2} ) )| 3(?: 1\d{2}(?: \d{2} )?| 2(?: [045]\d| [236-9]\d{1,2} )| 32\d{2} )| 4(?: [18]\d{2}| 2(?: [2-46]\d{2}| 3 )| 5[25]\d{2} )| 5(?: 1\d{2}| 2(?: 3\d| 5 ) )| 6(?: [18]\d{2}| 2(?: 3(?: \d{2} )?| [46]\d{1,2}| 5\d{2}| 7\d )| 5(?: 3\d?| 4\d| [57]\d{1,2}| 6\d{2}| 8 ) )| 71\d{2}| 8(?: [18]\d{2}| 23\d{2}| 54\d{2} )| 9(?: [18]\d{2}| 2[2-5]\d{2}| 53\d{1,2} ) )\d{3}| 5(?: 02[03489]\d{2}| 1\d{2}| 2(?: 1\d{2}| 2(?: 2(?: \d{2} )?| [457]\d{2} ) )| 3(?: 1\d{2}| 2(?: [37](?: \d{2} )?| [569]\d{2} ) )| 4(?: 1\d{2}| 2[46]\d{2} )| 5(?: 1\d{2}| 26\d{1,2} )| 6(?: [18]\d{2}| 2| 53\d{2} )| 7(?: 1| 24 )\d{2}| 8(?: 1| 26 )\d{2}| 91\d{2} )\d{3}| 6(?: 0(?: 1\d{2}| 2(?: 3\d{2}| 4\d{1,2} ) )| 2(?: 2[2-5]\d{2}| 5(?: [3-5]\d{2}| 7 )| 8\d{2} )| 3(?: 1| 2[3478] )\d{2}| 4(?: 1| 2[34] )\d{2}| 5(?: 1| 2[47] )\d{2}| 6(?: [18]\d{2}| 6(?: 2(?: 2\d| [34]\d{2} )| 5(?: [24]\d{2}| 3\d| 5\d{1,2} ) ) )| 72[2-5]\d{2}| 8(?: 1\d{2}| 2[2-5]\d{2} )| 9(?: 1\d{2}| 2[2-6]\d{2} ) )\d{3}| 7(?: (?: 02| [3-589]1| 6[12]| 72[24] )\d{2}| 21\d{3}| 32 )\d{3}| 8(?: (?: 4[12]| [5-7]2| 1\d? )| (?: 0| 3[12]| [5-7]1| 217 )\d )\d{4}| 9(?: [35]1| (?: [024]2| 81 )\d| (?: 1| [24]1 )\d{2} )\d{3} "#; // TODO: This is a good candidate of a seq of literals that could be // shrunk quite a bit and still be very productive with respect to // literal optimizations. let (prefixes, suffixes) = e(pat); assert!(!suffixes.is_finite()); assert_eq!(Some(243), prefixes.len()); } #[test] fn optimize() { // This gets a common prefix that isn't too short. let (p, s) = opt(["foobarfoobar", "foobar", "foobarzfoobar", "foobarfoobar"]); assert_eq!(seq([I("foobar")]), p); assert_eq!(seq([I("foobar")]), s); // This also finds a common prefix, but since it's only one byte, it // prefers the multiple literals. let (p, s) = opt(["abba", "akka", "abccba"]); assert_eq!(exact(["abba", "akka", "abccba"]), (p, s)); let (p, s) = opt(["sam", "samwise"]); assert_eq!((seq([E("sam")]), seq([E("sam"), E("samwise")])), (p, s)); // The empty string is poisonous, so our seq becomes infinite, even // though all literals are exact. let (p, s) = opt(["foobarfoo", "foo", "", "foozfoo", "foofoo"]); assert!(!p.is_finite()); assert!(!s.is_finite()); // A space is also poisonous, so our seq becomes infinite. But this // only gets triggered when we don't have a completely exact sequence. // When the sequence is exact, spaces are okay, since we presume that // any prefilter will match a space more quickly than the regex engine. // (When the sequence is exact, there's a chance of the prefilter being // used without needing the regex engine at all.) let mut p = seq([E("foobarfoo"), I("foo"), E(" "), E("foofoo")]); p.optimize_for_prefix_by_preference(); assert!(!p.is_finite()); } } <file_sep>/regex-test/Cargo.toml [package] name = "regex-test" version = "0.1.0" #:version authors = ["The Rust Project Developers", "<NAME> <<EMAIL>>"] description = """ Infrastructure for testing regexes. You probably don't want to use this crate unless you're working on a regex implementation. """ documentation = "https://docs.rs/regex-test" repository = "https://github.com/rust-lang/regex/tree/master/regex-test" readme = "README.md" keywords = ["regex", "regexp", "dfa", "automata", "test"] license = "MIT OR Apache-2.0" edition = "2021" [lib] name = "regex_test" bench = false path = "lib.rs" [dependencies] anyhow = "1.0.27" bstr = { version = "1.3.0", default-features = false, features = ["std", "serde"] } serde = { version = "1.0.105", features = ["derive"] } toml = { version = "0.7.3", default-features = false, features = ["parse"] } <file_sep>/regex-automata/tests/gen/mod.rs mod dense; mod sparse; <file_sep>/regex-capi/test #!/bin/sh set -e # cd to the directory containing this crate's Cargo.toml so that we don't need # to pass --manifest-path to every `cargo` command. cd "$(dirname "$0")" cargo build --verbose (cd ctest && ./compile && LD_LIBRARY_PATH=../../target/debug ./test) (cd examples && ./compile && LD_LIBRARY_PATH=../../target/debug ./iter)
e1f5ca2d5a0606c6b8a8712b72bbf2534d141e8b
[ "TOML", "Shell", "C", "Rust", "Markdown", "YAML" ]
220
TOML
rust-lang/regex
cdc0dbd3547462aedb6235197c2b743ec4ea75e5
aaa8e0762fa9f26344deb39e32d4251a31dfa70b
refs/heads/master
<repo_name>simplybusiness/oneday<file_sep>/src/oneday/domain.clj (ns oneday.domain (:require [cheshire.core :as json] [clojure.string :as str] [clojure.java.io :as io] [jdbc.core :as jdbc])) (def proposal-sql (slurp (io/resource "sql/proposal-frag.sql"))) (defn get-proposal-by-id [db id] (first (jdbc/fetch db [(str "select * from (" proposal-sql " ) proposal where id = ?") id]))) (defn may-update? [subscriber proposal] (or (= (:id subscriber) (:proposer_id proposal)) #_ (= (:id subscriber) 1))) (defn post-proposal [db p] (let [res (jdbc/fetch db ["insert into proposal (title,description,complexity,status,proposer_id) values (?,?,?, ?::proposal_status,?) returning id" (:title p) (:description p) (:complexity p) (:status p) (:proposer-id p)])] (:id (first res)))) (defn update-proposal [db id p] (let [res (jdbc/fetch db ["update proposal set title=?,description=?,complexity=?,status=?::proposal_status,updated=now() where id=? returning id" (:title p) (:description p) (:complexity p) (:status p) id])] (:id (first res)))) (defn add-kudosh [db proposal-id points sponsor-id] (:created_at (first (jdbc/fetch db ["insert into kudosh (proposal_id, points, sponsor_id) values (?,?, ?) returning created_at" proposal-id points sponsor-id])))) (defn add-comment [db proposal-id comment] (let [res (jdbc/fetch db ["insert into comment (proposal_id, text, interested, demo, author_id) values (?,?,?,?, ?) returning id" proposal-id (:text comment) (:interested comment) (:demo comment) (:author-id comment)])] (when-let [id (:id (first res))] (and (or (not (:sponsor comment)) (add-kudosh db proposal-id (:kudosh comment) (:author-id comment))) id)))) (defn get-subscriber-from-id-token [db id-token] ;; if wrap-auth works correctly it will set subscriber in the session, ;; so ths function is only going to be called once per session (let [iss (:iss id-token) sub (:sub id-token) display-name (:name id-token) handle (str/replace (.toLowerCase display-name) #"[^a-zA-Z0-9-]" "") payload (json/generate-string id-token) subscriber (or (first (jdbc/fetch db ["select s.* from subscriber s join authentication a on s.id=a.subscriber_id where a.iss=? and a.sub=?" iss sub])) ;; this is possibly racy, if new subscriber ;; is logging in from two browsers at once (first (jdbc/fetch db ["insert into subscriber (handle, display_name) values (?,?) returning *", handle display-name]))) subscriber-id (:id subscriber)] (jdbc/execute db ["insert into authentication (subscriber_id,iss,sub,display_name,payload) values (?,?,?,?,?::jsonb) on conflict on constraint authentication_pkey do update set subscriber_id=?, iss=?, sub=?, display_name=?, payload=?::jsonb" subscriber-id iss sub display-name payload subscriber-id iss sub display-name payload]) subscriber)) <file_sep>/resources/placeholder-description.md # What should be done? [ Describe the change that needs to be made ] # Why does it need doing? [ What are the consequences if nobody picks it up? What are the cool new things we'll be able to do if someone *does* pick it up? ] # How will we tell when it has been done? [ Also known as "acceptance criteria" ] # What else do you want to say? * here is where you can add * links to related resources, like * github URLs * trello cards * etc <file_sep>/README.md # Oneday Have you ever said "one day we ought to do _thing_"? Or, "someone could do _thing_ in one day"? ## Recognising technical excellence How do we recognise and reward technical excellence? Oneday is "like BountySource.com but for the apps we write at Simply Business" * Found some area of our codebase that needs a little love and attention? Write up a proposal and attach a Kudosh reward. * Browse through other people's proposals to find something different or interesting to pick up. # How it works _[ this next bit is a description of how things will be and has not yet been implemented ]_ ## Posting a proposal 1. Give it a title and description. Make it sound interesting and attractive so that developers will want to pick it up. We expect that small well-defined proposals (*something you can do in One Day*) will be more successful than large, open-ended or vague ones. 2. Estimate its complexity, Cynefin-style. If it's Complex or Chaotic you may no get many takers 3. Decide how important it is and how much you value it. 4. Post it. For great justice! Optionally you might want to link to Trello cards or Github issues in your description for background. That's cool, if the description itself (or the reward) is compelling enough that people are going to click through. You might also want to link from the project source code to the Oneday page so that anyone looking at that part of the codebase will find your offer of reward-for-refactoring. ## Browsing You can see what's previously been offered, comment, and even add your own rewards. If you find one you want to work on, add a comment and check the "I am interested in working on this" sign. Nothing stops you from working on the same proposal as somebody else, but it wil be up to you and them and the sponsors collectively to co-ordinate and to decide how to split the rewards. ## Claim credit If you've done the work you deserve the recognition. To tell the world and claim the reward, click on the medal icon, and write a note which includes a link (e.g. the branch or PR URL) to your work. This will notify the sponsors to go and look: it is then up to you and them to decide whether the work meets the requirement, and award the points. # How to develop/run it, etc etc ## First-time setup ### Dependencies If you are using the Nix package manager, you can run `nix-shell` in this directory to get a development environment instead of resorting to Homebrew. If not, you will need `leiningen` and `postgresql` brew install leiningen postgresql ### Postgresql You will need to have a Postgresql server running and configured. initdb -D var/postgresql foreman start # takes over your terminal, so open another (psql -d template1 -c "\du" |grep onedayuser) || createuser onedayuser (psql -d template1 -c "\l" |grep oneday) || createdb oneday ### Google Identity Platform Oneday is tied to the Google Identity PLatform so that people can login and authenticate against our Google Apps domain. You need to obtain a "client id" and "client secret" by following the steps at * https://developers.google.com/identity/protocols/OpenIDConnect Once you have these, copy the file `dev.edn.sample` in this repo to `dev.edn` and update the `client-id` and `client-secret` keys. ## Starting background processes At the beginning of each development session, just run $ foreman start # takes over your terminal, so open another $ lein migratus migrate # run any unapplied migrations Then run $ lein repl ondeday.core> (-main "dev.edn") to get a REPL ("Read Eval Print Loop" - an interactive top level like the Rails console), and start the HTTP server on port 3000. You can get fancier: if you speak Emacs my recommendation is to use Cider, but this README file is too short to teach you Emacs. ## Deployment This bit has yet to be written, but there's going to be a Dockerfile and stuff <file_sep>/src/oneday/config.clj (ns oneday.config (:require [clojure.edn :as edn] [clojure.string :as str] [clojure.test :as test :refer [is deftest with-test]])) ;; Guessing the type of a config attribute based on its value is ;; obviously (I *claim* this is obvious ...) unreliable and for that ;; reason not good style. Perhaps a future version of this software ;; will use core.spec to define the allowable configuration robustly (defn guess-type [v] (cond (= v "true") true (= v "false") false (re-matches #"-?[0-9]+" v) (Integer/parseInt v) true v)) (defn nest-map [h] "Turns a single-level map with entries L1__L2__L3=v into a nested map {:l1 => { :l2 => { :l3 => v } } }, transforming underscores in map keys into hyphens" (let [kwize (fn [s] (keyword (str/replace (.toLowerCase s) #"_" "-")))] (reduce (fn [a [k v]] (assoc-in a (map kwize (str/split k #"__")) (guess-type v))) {} h))) (deftest nest-map-test (is (= {:l1 { :l2 {:l3 "v"}}} (nest-map {"L1__L2__L3" "v"}))) (is (= {:l1-l2 {:l3 "v"}} (nest-map {"L1_L2__L3" "v" })))) (defn deep-merge [a b] (if (map? a) (merge-with deep-merge a b) b)) (deftest deep-merge-test (is (= {:a {:b 1} :c 2} (deep-merge {:a :foo :c 2} {:a {:b 1}})))) (defn read-env ([prefix] (read-env prefix (System/getenv))) ([prefix env] (get (nest-map env) prefix))) (defn read-file [config-file] (-> config-file slurp edn/read-string)) (defn read-config [config-file env-prefix] (deep-merge (read-file config-file) (read-env env-prefix))) (def secret-paths [[:http :session-secret] [:db :spec] [:oidc :google :client-secret]]) (defn redact-config [config] "Given a configuration map, replace the sensitive/secret values (as specified by `secret-paths` with the key `:redacted` so that it can safely be e.g. printed to a log file" (reduce (fn [c path] (assoc-in c path :redacted)) config secret-paths)) (deftest redact-config-test (let [config {:http {:port 3000 :session-secret '<KEY>' } :db {:spec "postgresql://onedayuser:onedaypw@localhost/oneday"} :oidc {:google { :client-id "1234567890" :client-secret "ZZZZZZZZzzzzzz" :discovery-uri "https://accounts.google.com/.well-known/openid-configuration" :provider-name "google" :redirect-uri "http://localhost:3000/login/google/postauth" } } } redacted (redact-config config)] (is (= :redacted (get-in redacted [:http :session-secret]))) (is (= :redacted (get-in redacted [:db :spec]))) (is (= :redacted (get-in redacted [:oidc :google :client-secret]))) (is (= (get-in redacted [:http :port]) (get-in config [:http :port]))))) <file_sep>/src/oneday/controllers/proposal.clj (ns oneday.controllers.proposal (require [oneday.domain :as d] [clojure.java.io :as io] [jdbc.core :as jdbc] [clojure.walk :refer [keywordize-keys]] [ring.util.response :as rsp] [oneday.helpers :as h] [oneday.views.proposal :as v])) ;; a controller ns chooses the view to render, and does any ;; lookups (local database or external services) to get the ;; data that view will need ;; THINKABOUTME there's quite a lot of sql and jdbc grunge in this ;; file which feels like it should be moved into a db interface of ;; some kind (def proposal-sql (slurp (io/resource "sql/proposal-frag.sql"))) (defn index [r _] (let [offset 0 limit 10 proposals (jdbc/fetch (:db r) [(str proposal-sql "order by created_at desc offset ? limit ?") offset limit])] {:view v/index :proposals proposals})) (defn show [r route] (let [id (-> route :route-params :id Integer/parseInt) proposal (d/get-proposal-by-id (:db r) id) editable? (d/may-update? {:id (h/request-subscriber-id r)} proposal) sponsors (jdbc/fetch (:db r) ["select sum(points),s.* from kudosh k join subscriber s on s.id=k.sponsor_id where proposal_id=? group by s.id" id]) comments (jdbc/fetch (:db r) ["select c.*,s.handle as author from comment c join subscriber s on c.author_id=s.id where proposal_id=? and text<>'' order by created_at " id]) ] {:view v/show :proposal proposal :sponsors sponsors :edit-url (and editable? (str (:uri r) "/edit")) :comments comments})) (defn post [r _] (let [p (and (= (:request-method r) :post) (keywordize-keys (:form-params r))) success (and p (let [p (assoc p :proposer-id (h/request-subscriber-id r))] (d/post-proposal (:db r) p)))] (if success {:redirect show :id success} ;; not happy about the value I'm sending into this view. It's ;; maybe a special case because there is yet no entity associated ;; with the view - just the stuff that the user keyed in but ;; which would not validate as a legitimate proposal {:view v/post :params p}))) (defn edit [r route] (let [id (-> route :route-params :id Integer/parseInt) p (and (= (:request-method r) :post) (keywordize-keys (:form-params r))) before (d/get-proposal-by-id (:db r) id) editable? (d/may-update? {:id (h/request-subscriber-id r)} before)] (if (d/may-update? {:id (h/request-subscriber-id r)} before) (if-let [success (and p (d/update-proposal (:db r) id p))] {:redirect show :id id} {:view v/edit :params (d/get-proposal-by-id (:db r) id)}) {:respond (-> "Current subscriber may not edit" rsp/response (rsp/status 403) (rsp/content-type "text/plain"))}))) <file_sep>/src/authomatic/oidc.clj (ns authomatic.oidc (:require [clj-http.client :as http] [clojure.string :as str] [ring.util.codec] [clojure.set] [clojure.walk :refer [keywordize-keys]] [ring.util.response :as resp] [cheshire.core :as json] [clojure.test :as test :refer [is deftest with-test]] ) (:import [org.apache.commons.codec.binary Base64 Hex])) (defn read-json-response [s] (keywordize-keys (json/decode (:body s)))) (defn discover-configuration [options] (if-let [uri (:discovery-uri options)] (merge (-> uri http/get read-json-response) options) options)) (defn validate-configuration [options] (let [actual (set (keys options)) expected #{:userinfo_endpoint :authorization_endpoint :client-id :redirect-uri :provider-name :issuer :token_endpoint}] (or (and (clojure.set/subset? expected actual) options) (throw (ex-info "missing keys in configuration" {:missing (clojure.set/difference expected actual)}))))) (defn gen-oauth-state-token [] (let [bytes (byte-array 32)] (with-open [r (java.io.FileInputStream. "/dev/urandom")] (.read r bytes 0 32)) (Base64/encodeBase64String bytes))) (def scopes-desired #{"openid" ;openid #_"sdps-r" ;yahoo "email" ;openid "public_profile" ;facebook "profile" ;openid }) (defn authorization-request-uri [options state] (let [{:keys [client-id scopes_supported redirect-uri authorization_endpoint]} options scopes (clojure.set/intersection (set scopes_supported) scopes-desired)] (str authorization_endpoint "?" (ring.util.codec/form-encode {"client_id" client-id "response_type" "code" "scope" (str/join " " scopes) "redirect_uri" redirect-uri "state" state})))) (defn redirect-to-idp [options request] (let [random (gen-oauth-state-token) state (str/join ":" [random (:uri request)])] (assoc-in (resp/redirect (authorization-request-uri options state)) [:session (:provider-name options) :state] state))) (defn decode-jwt [token] {:pre [(string? token) (.contains token ".")]} (let [[header payload sig] (str/split token #"\.")] (json/parse-string (String. (Base64/decodeBase64 payload)) true))) (defn decode-token-json [json] (if-let [decoded (and (:id_token json) (decode-jwt (:id_token json)))] (assoc json :id_token_decoded decoded) (throw (ex-info "Malformed OAuth id-token" json)))) (defn request-for-token-endpoint [options code] (let [body (ring.util.codec/form-encode {"code" code "grant_type" "authorization_code" "client_id" (:client-id options) "client_secret" (:client-secret options) "redirect_uri" (:redirect-uri options) })] {:method :post :url (:token_endpoint options) :as :json :headers {"Content-Type" "application/x-www-form-urlencoded" "Accept" "application/json"} :body body})) (defn request-token-for-code [options code] (->> code (request-for-token-endpoint options) http/request :body decode-token-json)) (defn handle-idp-callback [provider-name options request] (let [{state "state" code "code"} (:params request) required-state (get-in request [:session provider-name :state])] (if (= state required-state) (if-let [token (request-token-for-code options code)] (assoc-in (resp/redirect (second (str/split state #":"))) ;; make sure we wipe out state [:session provider-name] {:credentials token}) (throw (ex-info "can't get oauth token!" {}))) (throw (ex-info "received idp callback with incorrect state" {:expected required-state :actual state}))))) ;; wraps a ring handler with something that will check for ;; some credentials, and perform OpenID Connect authentication dance ;; if there are none present (defn wrap-oidc [handler options] (let [provider-name (:provider-name options) redirect-uri (:redirect-uri options) discovered (discover-configuration options) options (validate-configuration discovered)] (fn [request] (let [creds (get-in request [:session provider-name :credentials]) state (get-in request [:session provider-name :state])] (cond ;; already authed, pass through creds (handler request) ;; don't auth the health check XXX this is ugly (= (:uri request) "/health") (resp/response "OK") ;; handle the post-auth redirect from IdP (and (= (.getPath (java.net.URI. redirect-uri)) (:uri request)) state) (handle-idp-callback provider-name options request) ;; auth in progress for a different request that is not this ;; one, so don't start another state (resp/not-found "cannot serve resource while oauth request pending") ;; not authed, no auth in progress - begin the oauth dance true (redirect-to-idp options request)))))) (def test-options { :discovery-uri "https://accounts.google.com/.well-known/openid-configuration" :client-id "12345" :provider-name "example" :redirect-uri "/oidc/redirect" :authorization_endpoint "https://accounts.example.com/o/oauth2/v2/auth" }) (deftest redirect-to-idp-test (let [h (fn [_] (resp/response "hello world")) req {:uri "/proposals" :scheme :https :request-method :get} app (-> h (wrap-oidc test-options)) rsp (app req)] (is (= 302 (:status rsp))) (is (= "/proposals" (second (str/split (-> rsp :session (get "example") :state) #":")))) ;; would like to test also that the cookie is httponly/secure (is (re-find #"accounts.example.com/o/oauth2/v2/auth" (-> rsp :headers (get "Location")))))) (deftest auth-in-progress-test (let [h (fn [_] (resp/response "hello world")) session {"example" {:state "2345678908765432:/proposals/"}} req {:uri "/oidc/redirect" :scheme :https :request-method :get :session session} app (-> h (wrap-oidc test-options)) rsp (app req)] (is (nil? rsp)))) <file_sep>/TODO.org # TO DO * authentication (google openid connect) ** DONE authomatic.oidc - acts as middleware - handles /login/provider/redirect - needs an http library to do exchange-token-for-code - passes other requests through to next handler, after augmenting the request to include authn details https://developers.google.com/identity/protocols/OpenIDConnect#authenticatingtheuser ** DONE connect google authentication to local subscriber record - authentications table has iss, sub, subscriber-id, display-name and a json field for all the other crap - change the domain functions to use subscriber-id instead of handle ** TODO set hd param, check hd claim https://developers.google.com/identity/protocols/OpenIDConnect#authenticationuriparameters ** review https://tools.ietf.org/id/draft-ietf-oauth-security-topics-05.html * commenting: ** DONE add comments ** DONE view comments ** edit comments? nope, add another * DONE edit proposal ** DONE add 'status' field draft/open/complete/superseded ** DONE change description ** DONE change status (open/completed/withdrawn) ** DONE permissions: may subscriber S edit proposal P? * deploy ** DONE plausible dev deploy ** TODO hide the postgres password ** DONE find out why we're writing empty log lines - need to change "request" to something else ** live deploy * proposal lifetime management ** DONE allow changing proposal status ** draft proposal is unpublished & may not be commented/worked on ** open status is default ** completed/withdrawn status may not be demoed ** sponsors are notified about demos ** interesteds are notified about changes * make it look prettier * figure out notification channel ** maybe email ** maybe rss ** some other slack integration? ** web notifications api - https://developer.mozilla.org/en-US/docs/Web/API/Notifications_API/Using_the_Notifications_API * personal profile page ** what they've done ** what they're interested in ** what they've sponsored <file_sep>/resources/migrations/20181108220715-comments-table.up.sql create table comment ( id serial primary key, proposal_id int references proposal, text varchar, author_id int references agent, interested boolean ) <file_sep>/resources/migrations/20181102115330-make-proposal-table.up.sql create table proposal ( title varchar, created_at timestamptz); <file_sep>/src/oneday/page.clj (ns oneday.page (:require [hiccup.core :as h] [hiccup.page :as p])) (defn page [title & content] (let [body (p/html5 {} [:head {} [:link {:rel "stylesheet" :href "/static/styles.css"}] [:title {} title]] [:body {} [:header.gradient [:a {:href "/proposals/"} [:h1 "One day"]]] [:div.contents content]])] {:status 200 :body body :headers {"content-type" "text/html"}})) <file_sep>/Procfile db: postgres -D var/postgresql -c log_destination=stderr -c log_min_duration_statement=0 logwatch: touch http.json.log && tail -f http.json.log <file_sep>/resources/migrations/20181107225537-add-proposal-id.up.sql alter table proposal add column id serial primary key; <file_sep>/resources/migrations/20181109091051-kudosh-table.down.sql drop table kudosh; <file_sep>/resources/migrations/20181102115330-make-proposal-table.down.sql drop table proposal; <file_sep>/resources/migrations/20181114155537-rename-agent-table.down.sql alter table subscriber rename to agent; alter table proposal rename column proposer_id to agent; <file_sep>/resources/migrations/20181122213551-comment-is-demo.up.sql alter table comment add column demo boolean; <file_sep>/src/oneday/http.clj ;; the majority of this file is about routes, not http per se ;; think about renaming it (ns oneday.http (:require [bidi.bidi :as bd] [bidi.ring :refer [->Resources]] [clojure.java.io :as io] [oneday.page :refer [page]] oneday.controllers.proposal oneday.controllers.static oneday.controllers.comment [oneday.domain :as domain] [ring.util.response :as rsp] [cheshire.core :as json] [authomatic.oidc :refer [wrap-oidc]] [ring.middleware.params :refer [wrap-params]] [ring.middleware.session :refer [wrap-session]] [ring.middleware.session.cookie :refer [cookie-store]] [ring.middleware.content-type :refer (wrap-content-type)] [oneday.ring-log :as ring-log] [ring.adapter.jetty :refer [run-jetty]] [clojure.test :as test :refer [is deftest with-test]])) (def routes ["/" {["static/" [ #"[a-zA-Z0-9/_\.-]+" :path]] #'oneday.controllers.static/send-resource "proposals/" {"" #'oneday.controllers.proposal/index "post" #'oneday.controllers.proposal/post [:id] #'oneday.controllers.proposal/show [:id "/edit"] #'oneday.controllers.proposal/edit [:id "/comments/new"] #'oneday.controllers.comment/new }}]) (defn app-handler [r] (let [route (bd/match-route routes (:uri r))] (if route (let [controller (:handler route) view-data (controller r route)] (if-let [view (:view view-data)] (rsp/charset (view (dissoc view-data :view)) "UTF-8") (if-let [handler (:redirect view-data)] (let [path (bd/unmatch-pair routes {:handler handler :params (dissoc view-data :redirect)})] (rsp/redirect path :see-other)) (:respond view-data)))) (rsp/content-type (rsp/not-found "not found") "text/plain")))) (defn wrap-auth [h] (fn [r] ;; find local user from foreign credentials, and put in session to ;; avoid database lookup on every single request (if-let [subscriber (get-in r [:session :subscriber])] (h r) (let [token (get-in r [:session "google" :credentials :id_token_decoded]) subscriber (domain/get-subscriber-from-id-token (:db r) token) response (h (assoc-in r [:session :subscriber] subscriber)) session (or (:session response) (:session r))] (assoc response :session (assoc session :subscriber subscriber)))))) (defn middlewares [config] (-> app-handler (wrap-auth) (wrap-oidc (-> config :oidc :google)) wrap-params wrap-content-type (wrap-session {:cookie-attrs {:secure (-> config :http :secure)} :store (cookie-store {:key (-> config :http :session-secret)})}) (ring-log/wrap-catch-and-log {:log-stream *err* :log-format :json}) )) (defn start [config] (let [db (-> config :db :connection) log-stream (if-let [f (-> config :http :log-file-name)] (io/writer (io/file f) :append true) *err*) pipeline (binding [*err* log-stream] (middlewares config)) wrap-db (fn [h] (fn [r] (h (assoc r :db db)))) server (run-jetty (wrap-db pipeline) (assoc (:http config) :join? false))] (binding [*out* log-stream] (println (json/generate-string {:timestamp (java.util.Date.) :message "HTTP server ready"}))) (assoc-in config [:http :server] server))) (defn stop [config] (.stop (-> config :http :server)) config) <file_sep>/src/oneday/ring_log.clj (ns oneday.ring-log (:require [ring.util.response :as rsp] [cheshire.core :as json] [clojure.test :as test :refer [is deftest with-test]])) (defn stacktrace-el->array [el] (let [{:keys [className fileName lineNumber methodName]} (bean el)] (map (fn [v] (and v (.toString v))) [className fileName lineNumber methodName]))) (defn error-attributes [ex] (let [m (Throwable->map ex)] {:cause (:cause m) :trace (if-let [trace (:trace m)] (map stacktrace-el->array trace)) :via (map (fn [{ :keys [type message at]}] {:type (pr-str type) :message message :at (pr-str at)}) (:via m))})) (defn log-entry-payload [request response exception] (let [value {:http-request (select-keys request [:uri :remote-addr :headers :server-port :server-name :query-string :scheme :request-method]) :status (:status response) :timestamp (java.util.Date.) :exception exception}] (into {} (filter val value)))) (defn json-log [request response & [exception]] (println (json/generate-string (log-entry-payload request response exception)))) (defn edn-log [request response & [exception]] (println (log-entry-payload request response exception))) (defn wrap-catch-and-log [h {:keys [log-stream log-format]}] (fn [request] (let [[response error] (try [(h request) nil ] (catch Throwable ex [(-> (rsp/response "Internal error. Clean up on aisle 5") (rsp/status 500) (rsp/content-type "text/plain")) (error-attributes ex)]))] (binding [*out* log-stream] ((get {:json json-log :edn edn-log} log-format edn-log) request response error)) response))) (deftest wrap-log-exception (let [s (java.io.StringWriter.) handler (wrap-catch-and-log (fn [r] (/ 1 0)) {:log-stream s})] (handler {:uri "/" :request-method :get}) (let [line (.toString s)] (is (re-find #":status 500" line)) (is (re-find #":exception" line)) (is (re-find #":cause Divide by zero" line))))) (deftest wrap-log-ok (let [s (java.io.StringWriter.) handler (wrap-catch-and-log (fn [r] (rsp/response "OK")) {:log-stream s})] (handler {:uri "/" :request-method :get}) (let [line (.toString s)] (is (re-find #":status 200" line)) (is (not (re-find #":exception" line)))))) ;; (let [handler (wrap-catch-and-log (fn [r] (rsp/response "dfgg")))] ;; (handler {:uri "/" :request-method :get})) <file_sep>/src/oneday/views/proposal.clj (ns oneday.views.proposal (:require [oneday.page :refer [page]] [clojure.java.io :as io] [oneday.helpers :as h] [markdown.core :as md] [hiccup.form :as f])) (def placeholder-descr (slurp (io/resource "placeholder-description.md"))) (defn proposal-form [p] (f/form-to [:post ""] [:div {} [:label {:for :title} "Title"] (f/text-field :title (:title p))] [:div {} [:label {:for :description :title "(markdown ok)"} "Description" ] (f/text-area :description (:description p))] [:div {} [:label {:for :tags} "Tags"] (f/text-field :tags (:tags p))] [:div {} [:label {:for :complexity} "Complexity"] (f/drop-down :complexity ["Obvious" "Complicated" "Complex" "Chaotic"] (:complexity p)) [:label {:for :status} "Status"] (f/drop-down :status ["Draft" "Open" "Completed" "Withdrawn"] (or (:status p) "Open"))] [:button {} "Post"])) (defn post [state] (let [p (merge {:description placeholder-descr} (or (:params state) {}))] (page "Post a proposal" [:div.proposal.post-proposal {} [:h2 "Post a proposal"] (proposal-form p) ]))) (defn edit [state] (let [p (merge {:description placeholder-descr} (or (:params state) {}))] (page "Edit proposal" [:div.proposal.post-proposal {} [:h2 "Edit proposal"] (proposal-form p) ]))) (defn link-to [p] [:a {:href (:id p)} (:title p)]) (defn dateline [prop & [edit-url]] [:div.dateline "Proposed " (if-let [c(:created_at prop)] (h/format-time c) "") " by " (or (:proposer prop) "a mystery guest") (if edit-url [:span " [ " [:a {:href edit-url} "edit" ] " ]"]) ]) (defn description [prop] (md/md-to-html-string (:description prop))) (defn proposal-summary [prop] [:div.proposal {:onclick (str "window.location=window.location.href+" (:id prop))} [:h2 (link-to prop)] (dateline prop) [:p {:align :right :style "font-weight: bold"} [:a {:href (:id prop)} (:sponsors_count prop) " sponsors"] ", " [:a {:href (:id prop)} (:comments prop) " comments"]] ]) (defn show-comment [c] (let [interested? (:interested c) demo? (:demo c) sponsor? (:sponsor c)] [:div.comment {:class (cond interested? "interested" demo? "demo" :else "drive-by")} [:div.attribution {} "At " (h/format-time (:created_at c)) ", " (:author c) (cond interested? " said they were interested in working on it" demo? " requested a review" :else " wrote") ":"] [:div.body (:text c)]])) (defn show [value] (let [prop (:proposal value) kudosh (fn [n] [:label {} (h/merge-attrs (f/radio-button :kudosh false n) {:onchange "if(this.checked) document.getElementById('sponsor').checked=true;"}) n])] (page (str "oneday - " (:title prop)) [:div.proposal {} [:h2 (:title prop)] (dateline prop (:edit-url value)) [:blockquote (description prop)] (when-let [sponsors (seq (:sponsors value))] [:div.sponsors {} [:h3 "Sponsored by"] [:ul (map (fn [s] [:li (:handle s) " (" (:sum s) " points)"]) sponsors)]]) [:div.comments {} [:h3 "Comments"] (map show-comment (:comments value)) [:p] [:form {:class :comment :method "POST" :action (str (:id prop) "/comments/new")} [:div.field (h/merge-attrs (f/text-area :text "") {:placeholder "What do you think?"})] [:div.field [:label {} (f/check-box :interested) " I am interested in working on this (no commitment)"]] [:div.field (h/merge-attrs (f/check-box :sponsor) {:onclick "this.checked||Array.prototype.map.call(document.getElementsByName('kudosh'), function(l) {l.checked=false;})"}) " I want to sponsor this work with " (kudosh "10") " " (kudosh "20") " " (kudosh "40") " kudosh"] [:div.field [:label {} (f/check-box :demo) " I have been working on this: please review my approach /solution!"]] [:div.field [:button {} "Add comment"]] ]]]))) (defn index [value] (page "One day ..." [:div.intro [:p "Ever said “<b>one day</b> we ought to do <i>thing</i>” ?"] [:p "or “I think we could do <i>thing</i> in <b>one day</b>”?"] [:p {} [:a {:href "post"} "Make your proposal"] " for the thing we need to do"]] [:div.proposals (map proposal-summary (:proposals value))])) <file_sep>/shell.nix { pkgs ? import <nixpkgs> {} } : with pkgs; stdenv.mkDerivation rec { name = "oneday"; buildInputs = [ ]; nativeBuildInputs = [ leiningen postgresql foreman ]; shellHook = '' echo "(setq cider-lein-command \"${pkgs.leiningen}/bin/lein\")" > lein.el ''; }<file_sep>/resources/migrations/20181107222351-add-description-complexity.up.sql alter table proposal add column description varchar; alter table proposal add column complexity varchar; <file_sep>/resources/migrations/20181107234252-add-proposal-ts.down.sql alter table proposal alter column created_at drop default; <file_sep>/project.clj (defproject oneday "0.1.0-SNAPSHOT" :description "FIXME: write description" :url "http://example.com/FIXME" :license {:name "MIT License" :url "https://opensource.org/licenses/MIT"} :dependencies [[org.clojure/clojure "1.8.0"] [org.slf4j/slf4j-log4j12 "1.7.25"] [com.layerware/hugsql "0.4.9"] [funcool/clojure.jdbc "0.9.0"] [markdown-clj "1.0.5"] [clj-http "3.9.1"] [hiccup "1.0.5"] [ring "1.7.1"] [bidi "2.1.4"] [cheshire "5.8.1"] [org.postgresql/postgresql "42.2.2"] [migratus "1.1.6"]] :plugins [[migratus-lein "0.6.7"]] :main ^:skip-aot oneday.core :target-path "target/%s" :profiles {:uberjar {:aot :all}}) <file_sep>/src/oneday/controllers/static.clj (ns oneday.controllers.static (:require [clojure.java.io :as io] [ring.util.response :refer [resource-response]])) (defn send-resource [req route] (let [res (str "public/" (-> route :route-params :path)) r (resource-response res)] {:respond r})) <file_sep>/resources/migrations/20181126231238-proposal-status.down.sql alter table proposal drop column status; alter table proposal_history drop column status; drop type proposal_status; <file_sep>/resources/migrations/20181107222351-add-description-complexity.down.sql alter table proposal drop column complexity; alter table proposal drop column description; <file_sep>/resources/migrations/20181109115349-add-comment-timestamp.up.sql alter table comment add column created_at timestamptz default now(); <file_sep>/resources/migrations/20181126105621-populate-proposal-history.up.sql insert into proposal_history (select * from proposal); <file_sep>/resources/migrations/20181114222036-create-authentication-table.up.sql create table authentication ( subscriber_id int references subscriber, iss varchar not null, sub varchar not null, display_name varchar, payload jsonb, primary key (iss, sub)); <file_sep>/resources/migrations/20181108220715-comments-table.down.sql drop table comment; <file_sep>/resources/migrations/20181107234252-add-proposal-ts.up.sql alter table proposal alter column created_at set default now(); <file_sep>/resources/migrations/20181126231238-proposal-status.up.sql create type proposal_status as enum ('Draft','Open','Completed', 'Withdrawn'); alter table proposal add column status proposal_status; alter table proposal_history add column status proposal_status; <file_sep>/resources/sql/proposal-frag.sql select p.*, s.handle as proposer, (select count(comment.id) from comment where nullif(comment.text,'') is not null and comment.proposal_id = p.id) as comments, (select count(kudosh.sponsor_id) from kudosh where kudosh.proposal_id = p.id) as sponsors_count from (proposal p left join subscriber s on s.id=p.proposer_id) where p.created_at is not null <file_sep>/resources/migrations/20181124230858-proposal-history.up.sql alter table proposal add column updated timestamptz default now(); create table proposal_history ( like proposal including all ); alter table proposal_history drop constraint proposal_history_pkey; alter table proposal_history add primary key (id, updated); create function copy_to_history() returns trigger as E' begin insert into proposal_history values(new.*) \x3b return new \x3b end\x3b ' language plpgsql; create trigger update_proposal_history after insert or update on proposal for each row execute procedure copy_to_history(); <file_sep>/resources/migrations/20181109115349-add-comment-timestamp.down.sql alter table comment drop column created_at; <file_sep>/src/oneday/db.clj (ns oneday.db (:require [migratus.core :as migratus] [jdbc.core :as jdbc])) (defn migration-config [config] {:store :database :migration-dir "migrations" :db (str "jdbc:" (-> config :db :spec))}) (defn migrate [config] (migratus/migrate (migration-config config))) (defn start [config] (migrate config) (let [conn (jdbc/connection (-> config :db :spec))] (assoc-in config [:db :connection] conn))) (defn stop [config] (if-let [c (-> config :db :connection)] (.close c)) config) <file_sep>/resources/migrations/20181124230858-proposal-history.down.sql drop trigger if exists update_proposal_history on proposal ; drop function if exists copy_to_history(); drop table if exists proposal_history; <file_sep>/src/oneday/core.clj (ns oneday.core (:require oneday.http) (:require oneday.db) (:require [oneday.config :as config]) (:require [clojure.edn :as edn]) (:gen-class)) (defonce system (atom nil)) (defn run [config] (->> config oneday.db/start oneday.http/start)) (defn stop [sys] (->> sys oneday.http/stop oneday.db/stop)) (defn -main [config-file & args] (org.apache.log4j.BasicConfigurator/configure) (. (. org.apache.log4j.Logger getRootLogger) setLevel org.apache.log4j.Level/INFO) (let [cfg (config/read-config config-file :oneday)] (println (config/redact-config cfg)) (reset! system (run cfg)) (println "Hello, World!"))) <file_sep>/resources/migrations/20181114155537-rename-agent-table.up.sql alter table agent rename to subscriber; alter table proposal rename column agent to proposer_id; <file_sep>/resources/migrations/20181126105621-populate-proposal-history.down.sql truncate table proposal_history; <file_sep>/resources/migrations/20181109091051-kudosh-table.up.sql create table kudosh ( points integer, created_at timestamptz default now(), proposal_id int references proposal, sponsor_id int references agent ); <file_sep>/resources/migrations/20181114222036-create-authentication-table.down.sql drop table authentication; <file_sep>/resources/migrations/20181107225537-add-proposal-id.down.sql alter table proposal drop column id; <file_sep>/src/oneday/helpers.clj (ns oneday.helpers) (defn credentials [request] (get-in request [:session "google" :credentials :id_token_decoded])) (defn format-time [time] (let [now (java.util.Date.) diff (- (.getTime now) (.getTime time))] (cond (< diff 10000) "a few seconds ago" (< diff 60000) (str (int (/ diff 1000)) " seconds ago") (< diff 300000) (str (int (/ diff 60000)) " minutes ago") :else (let [sdf (java.text.SimpleDateFormat. "yyyy-MM-dd hh:mm")] (.format sdf time))))) (defn merge-attrs [[tagname attrs & content] more-attrs] (into [tagname (merge attrs more-attrs)] content)) (defn request-subscriber-id [request] (-> request :session :subscriber :id)) <file_sep>/resources/migrations/20181102115644-make-agents-table.down.sql drop table agent; <file_sep>/resources/migrations/20181122213551-comment-is-demo.down.sql alter table comment drop column demo; <file_sep>/resources/migrations/20181102115644-make-agents-table.up.sql create table agent ( id serial primary key, handle varchar, display_name varchar ); --;; alter table proposal add column agent int references agent; <file_sep>/src/oneday/controllers/comment.clj (ns oneday.controllers.comment (require [oneday.domain :as d] [clojure.java.io :as io] [jdbc.core :as jdbc] [clojure.walk :refer [keywordize-keys]] [ring.util.response :as rsp] #_ [oneday.views.comment :as v])) (defn new [req route] (let [proposal-id (Integer/parseInt (-> route :route-params :id)) params (keywordize-keys (:form-params req)) fields (assoc params :interested (not (empty? (:interested params))) :sponsor (not (empty? (:sponsor params))) :demo (not (empty? (:demo params))) :kudosh (Integer/parseInt (or (:kudosh params) "0")) :author-id (-> req :session :subscriber :id))] (if-let [comment (d/add-comment (:db req) proposal-id fields)] {:respond (rsp/redirect (str "/proposals/" proposal-id) :see-other)} {:respond {:status 200 :headers {"content-type" "text/plain"} :body (pr-str fields)}})))
06b272de9c40be130a90a27467030ba4293b6767
[ "Clojure", "Markdown", "Procfile", "SQL", "Nix", "PLpgSQL", "Org" ]
48
Clojure
simplybusiness/oneday
95250077677941499c373997de921e6a1304f134
bc0c59f4d6b978d9a92ad66972a024ef59ef0936
refs/heads/main
<repo_name>leonardo-fabricio/Hangman_python<file_sep>/jogoDaForca.py # Hangman Game (Jogo da Forca) # Programação Orientada a Objetos # Import import random # Board (tabuleiro) board = [''' >>>>>>>>>>Hangman<<<<<<<<<< +---+ | | | | | | =========''', ''' +---+ | | O | | | | =========''', ''' +---+ | | O | | | | | =========''', ''' +---+ | | O | /| | | | =========''', ''' +---+ | | O | /|\ | | | =========''', ''' +---+ | | O | /|\ | / | | =========''', ''' +---+ | | O | /|\ | / \ | | ========='''] # Classe class Hangman: # Método Construtor def __init__(self,word): self.word = word self.cont = 0 # Método para verificar se o jogo terminou def hangman_over(self): if(self.cont == 7): return True def increment(self): self.cont += 1 def getCont(self): return self.cont def getWord(self): return self.word # Método para checar o status do game e imprimir o board na tela def print_game_status(self): print(board[self.cont]) # Método para adivinhar a letra def guess(self, letter): lista = list(self.word) for x in lista: if(x == letter): return True def __len__(self): return len(list(self.word)) # Método para verificar se o jogador venceu def hangman_won(self,word_aux): aux = 0 lista = list(word_aux) lista2 = list(self.word) for x in range(0,len(lista2)-1): for y in range(0,len(lista)): if lista2[x] == lista[y]: aux += 1 if(aux == len(lista2)-1): return True else: return False # # Método para não mostrar a letra no board # def hide_word(self): # Função para ler uma palavra de forma aleatória do banco de palavras def rand_word(): with open("palavras.txt", "rt") as f: bank = f.readlines() return bank[random.randint(0,len(bank)-1)].strip() # Função Main - Execução do Programa def main(): # Objeto game = Hangman(rand_word()) #print(rand_word()) TESTE # Enquanto o jogo não tiver terminado, print do status, solicita uma letra e faz a leitura do caracter # Verifica o status do jogo letra_certas = " " letra_errada = " " game.print_game_status() print("A PALAVRA CONTEM ",len(game), " LETRAS") while game.getCont() != 6: in_user_word = input("Digite uma letra: ") if not (game.guess(str(in_user_word))): game.increment() letra_errada += in_user_word else: letra_certas += in_user_word print("Letras Certas: "+ letra_certas) print("Letras Erradas: "+ letra_errada) # De acordo com o status, imprime mensagem na tela para o usuário if game.hangman_won(letra_certas): print('\nParabéns! Você venceu!!') break game.print_game_status() print("A PALAVRA CONTEM ",len(game), " LETRAS") print("Palavra Certa: ", game.getWord()) print ('\nFoi bom jogar com você! Agora vá estudar!\n') # Executa o programa if __name__ == "__main__": main() <file_sep>/README.md # Hangman_python # Hangman_python
c1b916a511702c1db6733f6ae4ecb1ecbc3f30ca
[ "Markdown", "Python" ]
2
Markdown
leonardo-fabricio/Hangman_python
4e503a78bb1fe99ed992bab6993f13ee875b6bdf
c74c33581d7868cd27b56ef0a5d59282a0a902ce
refs/heads/master
<repo_name>Riverface/Arrays-practice<file_sep>/js/scripts.js var heckle = ["haha, look at this guy. <br> He likes ", ", what a loser. Really? <br> You like ", "? <br> Hahahaha. <br> Ooh, look at me, I'm ", "and I like ", "', <br> yeah that's you dude"]; var fullneg = []; var twothings = []; var things; var personName; $(document).ready(function() { $("#resultbutton").click(function(){ splice(); $("#results").html(fullneg); }); }); function splice() { personName = $("#name").val() + " "; things = ["thing1", "thing2", personName, "thing3"]; twothings = things.slice(0, 3); console.log(heckle); things[0] = $("#thing1").val(); things[1] = $("#thing2").val(); things[3] = $("#thing3").val(); console.log(things[0]); var h=0; var t=0; for(var i= 0; i < (heckle.length + things.length); i++) { if(i % 2 == 0) { fullneg.push(heckle[h] ); h++; console.log(heckle[h] + " " + h); } else { fullneg.push(things[t] ); t++; } } console.log(fullneg); console.log(personName); }
99a696dd135555987a6b056551ec88107bbdc460
[ "JavaScript" ]
1
JavaScript
Riverface/Arrays-practice
be4d17b86bae750d9e92bf6bb5d6bb7663efb98b
eb5693a366cba5b5c430535293f60308c506130b
refs/heads/master
<repo_name>monochromer/frontend-interview<file_sep>/README.md # frontend-interview Сборник [вопросов](https://github.com/monochromer/frontend-interview/issues) для собеседований frontend-разработчиков (Web UI)
7f8fe57dd95672b6d531fe117794583a2e9b7f94
[ "Markdown" ]
1
Markdown
monochromer/frontend-interview
e4496c003d9632f1ff0fb9ff285708801baa6bce
5f3d8f2ab349f92a0f63c8918fb3c7d84bec2229
refs/heads/main
<file_sep>const express = require('express'); const path = require('path') const http = require('http'); const socketio = require('socket.io'); const { Socket } = require('dgram'); const app = express(); const { reusableFxn, reusableLocationFxn } = require('./src/config/util/messages'); const server = http.createServer(app); const { addUser, removeUser, getUser, getUserWithInRoom } = require('./src/config/util/users') const io = socketio(server) const publicDirectoryPath = path.join(__dirname, '/public') app.use(express.static(publicDirectoryPath)) port = process.env.PORT; let count = 0; const Filter = require('bad-words') io.on('connection', (socket) => { socket.on('join', ({ userName, room }, callback) => { const { user, error } = addUser({ id: socket.id, userName, room }) if (error) { return callback(error) } socket.join(user.room) socket.emit('message', reusableFxn('Admin', 'Welcome')) socket.broadcast.to(user.room).emit('message', reusableFxn(`${user.userName} has joined`)); io.to(user.room).emit('roomData', { room: user.room, users: getUserWithInRoom(user.room) }) callback() }) socket.on('gettingMsg', (msg, callback) => { const user = getUser(socket.id) const filter = new Filter(); if (filter.isProfane(msg)) { return callback('Profanity is not allowed'); } io.to(user.room).emit('message', reusableFxn(user.userName, msg)) callback() }) socket.on('disconnect', () => { const user = removeUser(socket.id); if (user) { io.to(user.room).emit('message', reusableFxn('Admin', `${user.userName} has left`)) io.to(user.room).emit('roomData', { room: user.room, users: getUserWithInRoom(user.room) }) } }) socket.on('sendLocation', (data, callback) => { const user = getUser(socket.id) io.to(user.room).emit('senLoc', reusableLocationFxn(user.userName, `https://www.google.com/maps?q=${data.latitude},${data.longitude}`)) callback() }) }) server.listen(port, () => { console.log('server starting on ' + port); })<file_sep>const users = []; const addUser = ({ id, userName, room }) => { username = userName.trim().toLowerCase() room = room.trim().toLowerCase() //validate user if (!username || !room) { return { error: 'username and room are required' } } //checking the existing user const existingUser = users.find((user) => { return user.userName === username && user.room === room }) if (existingUser) { return { error: 'user already exist' } } //removing user const user = { id, userName, room } users.push(user); return { user } } const removeUser = (id) => { const index = users.findIndex((user) => { return user.id === id }) if (index !== -1) { return users.splice(index, 1)[0] } } const getUser = function (id) { return users.find((user) => user.id === id) } const getUserWithInRoom = function (room) { return users.filter((user) => user.room === room) } // addUser({ // id: 22, // userName: 'Andrew', // room: '123' // }) // addUser({ // id: 23, // userName: 'James', // room: '123' // }) // addUser({ // id: 23, // userName: 'kimmu', // room: '1234' // }) // const res = addUser({ // id: 24, // userName: '', // room: '' // }) module.exports = { getUser, getUserWithInRoom, addUser, removeUser }<file_sep>const socket = io() // document.querySelector('#increment').addEventListener('click', () => { // socket.emit('increment') // }) const messageForm = document.querySelector('#message-form') const messageFormInput = messageForm.querySelector('input'); const messageFormButton = messageForm.querySelector('button'); const messageTemplate = document.querySelector('#message-template').innerHTML const loadMessages = document.querySelector('#loadMessages') const locationTemplate = document.querySelector('#location-template').innerHTML const sendLocation = document.querySelector('#sendLocation') const sideBarTemplate = document.querySelector('#sideBar-template').innerHTML const autoScroll = () => { const newMessage = loadMessages.lastElementChild; const newMessageStyle = getComputedStyle(newMessage); const newMessageMargin = parseInt(newMessageStyle.marginBottom) const newMessageHeight = newMessage.offsetHeight + newMessageMargin; //visibe height const visibleHeight = loadMessages.offsetHeight //container height const containerHeight = loadMessages.scrollHeight; // scroll height const scrollOffSetHeight = loadMessages.scrollTop + visibleHeight if (containerHeight - newMessageHeight <= scrollOffSetHeight) { loadMessages.scrollTop = loadMessages.scrollHeight } } //option const { userName, room } = Qs.parse(location.search, { ignoreQueryPrefix: true }) socket.on('message', (message) => { const html = Mustache.render(messageTemplate, { loadMessages: message.text, createdAt: moment(message.createdAt).format('LT'), userName: message.username }) loadMessages.insertAdjacentHTML('beforeend', html) autoScroll() }) socket.on('senLoc', (senLoc) => { const html = Mustache.render(locationTemplate, { sendLocation: senLoc.url, createdAt: moment(senLoc.createdAt).format('LT'), userName: senLoc.username }) loadMessages.insertAdjacentHTML('beforeend', html) autoScroll() }) socket.on('roomData', ({ room, users }) => { const html = Mustache.render(sideBarTemplate, { room, users }) document.querySelector('#sideBar').innerHTML = html; }) messageForm.addEventListener('submit', (e) => { e.preventDefault() messageFormButton.setAttribute('disabled', 'disabled') const msg = e.target.elements.message.value socket.emit('gettingMsg', msg, (error) => { messageFormButton.removeAttribute('disabled'); messageFormInput.value = '' messageFormInput.focus() if (error) { return console.log(error) } console.log('Message has been delivered!') }) }) const locationButton = document.querySelector('#sendLoc') locationButton.addEventListener('click', () => { if (!navigator.geolocation) { return alert("Geo Location not supported by browser"); } locationButton.setAttribute('disabled', 'disabled'); navigator.geolocation.getCurrentPosition((position) => { const data = { latitude: position.coords.latitude, longitude: position.coords.longitude } socket.emit('sendLocation', data, () => { locationButton.removeAttribute('disabled') console.log("Location shared") }) }) }) socket.emit('join', { userName, room }, (error) => { if (error) { alert(error); location.href = '/'; } })
f557306eccf2a3a22ed3bf2398a4e76f498eb213
[ "JavaScript" ]
3
JavaScript
Soodharshal/chatApp
89d82ee5775b75ae675f760e6e8f474db9bc0d13
361bef61285d4a301614caf763d5dba0a9aa001c
refs/heads/master
<file_sep># one-sided-pyramid Minimal pyramid application for testing the installation process. <file_sep>from pyramid.view import view_config from pyramid.response import Response @view_config(route_name='home',) def my_view(request): return Response("This pyramid has only one side.") <file_sep>one-sided-pyramid README
f12e231ac5484e906c5c7feaf6ee00420bc1e107
[ "Markdown", "Text", "Python" ]
3
Markdown
kratenko/one-sided-pyramid
9673317f5245c8e3ee2a5698cd1acdd0b8738f0f
cc23866e232cc6fef5dda1cb49ffe97d7a512dbf
refs/heads/master
<file_sep><div class="row"> <div class="col-md-1"></div> <div class="col-md-10"> <div class="jumbotron"> <%= form_for(bus) do |f| %> <% if @user.errors.any? %> <div class="form-group alert alert-error alert-block" id="error_explanation"> <button type="button" class="close" data-dismiss="alert">x</button> <% @user.errors.full_messages.each do |message| %> <h6 class="heading_error"><%= message%></h6> <% end %> </div> <% end %> <% if bus.errors.any? %> <div class="form-group alert alert-error alert-block" id="error_explanation"> <button type="button" class="close" data-dismiss="alert">x</button> <% bus.errors.full_messages.each do |message| %> <h6 class="heading_error"><%= message%></h6> <% end %> </div> <% end %> <div class="row" style="border: 2px solid #333"> <div class="col-md-12"> <center><h3>Registration for Driver</h3></center> </div> <div class="col-md-12"> <div class="form-group"> <%= f.label :email %><br /> <%= f.email_field :email, autofocus: true, autocomplete: "email", class:"form-control" %> </div> <div class="form-group"> <%= f.label :password %> <%= f.password_field :password, autocomplete: "off", class:"form-control" %> </div> <div class="form-group"> <%= f.label :password_confirmation %><br /> <%= f.password_field :password_confirmation, autocomplete: "off", class:"form-control"%> </div> </div> </div> <br> <div class="row" style="border: 2px solid #333"> <div class="col-md-12"> <center><h3>Driver's Bus Detail</h3></center> </div> <div class="col-md-12"> <div style='width: 100%;'> <div id="map" style='width: 100%; height: 400px;'></div> </div> </div> <div class="col-md-12"> <div class="form-group"> <%= f.label :bus_number %> <%= f.text_field :bus_number, class:"form-control" %> </div> <div class="form-group"> <%= f.label :current_lat %> <%= f.number_field :current_lat, class:"form-control", step:"0.00000000001" %> </div> <div class="form-group"> <%= f.label :current_lan %> <%= f.number_field :current_lan, class:"form-control", step:"0.00000000001" %> </div> <div class="form-group"> <%= f.label :evening_start_time %><br> <%= f.time_select :evening_start_time,{prompt: true}, {class:"form-control", style:"font-size: 0.9em;width:49.5%;display:inline-block;", required: true}%> </div> <div class="form-group"> <%= f.label :evening_end_time %><br> <%= f.time_select :evening_end_time,{prompt: true}, {class:"form-control", style:"font-size: 0.9em;width:49.5%;display:inline-block;", required: true}%> </div> <div class="form-group"> <%= f.label :morning_start_time %><br> <%= f.time_select :morning_start_time,{prompt: true}, {class:"form-control", style:"font-size: 0.9em;width:49.5%;display:inline-block;", required: true}%> </div> <div class="form-group"> <%= f.label :morning_end_time %><br> <%= f.time_select :morning_end_time,{prompt: true}, {class:"form-control", style:"font-size: 0.9em;width:49.5%;display:inline-block;", required: true}%> </div> <div class="form-group"> <%= f.label :description %> <%= f.text_area :description, class:"form-control", style:"resize:none;", rows:3 %> </div> </div> </div> <br> <div class="form-group"> <%= f.submit class:"btn btn-success btn-block" %> </div> <% end %> </div> </div> <div class="col-md-1"></div> </div> <script src="//maps.google.com/maps/api/js?key=[your API key]"></script> <script src="//cdn.rawgit.com/mahnunchik/markerclustererplus/master/dist/markerclusterer.min.js"></script> <script src='//cdn.rawgit.com/printercu/google-maps-utility-library-v3-read-only/master/infobox/src/infobox_packed.js' type='text/javascript'></script> <!-- only if you need custom infoboxes --> <script> handler = Gmaps.build('Google'); handler.buildMap({ provider: {}, internal: {id: 'map'}}, function(){ markers = handler.addMarkers([ { "lat": 0, "lng": 0, "picture": { "url": "https://maps.googleapis.com/maps/api/js?key=<KEY>&callback=initialize", "width": 32, "height": 32 }, "infowindow": "hello!" } ]); handler.bounds.extendWith(markers); handler.fitMapToBounds(); }); </script> <file_sep> <div class="container"> <div class="row"> <div class="col-md-12"> <div class="jumbotron"> <div class="row"> <div class="col-md-12"><center><h2>Bus Detail</h2></center></div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Driver Email:</label> </div> <div class="col-md-6"> <label class="my_value"> <%= @bus.user.email %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Bus Number:</label> </div> <div class="col-md-6"> <label class="my_value"> <%= @bus.bus_number %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Current Longitude:</label> </div> <div class="col-md-6"> <label class="my_value"> <%= @bus.current_lan %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Current Latitude:</label> </div> <div class="col-md-6"> <label class="my_value"> <%= @bus.current_lat %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Bus Status:</label> </div> <div class="col-md-6"> <% if @bus.status%> <label class="my_value"> Active</label> <%else%> <label class="my_value"> Un-Active</label> <%end%> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Morning Start Time:</label> </div> <div class="col-md-6"> <% @mst = @bus.morning_start_time.to_s %> <% @mst = @mst.tr('UTC', '')%> <label class="my_value"> <%= @mst %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Morning End Time:</label> </div> <div class="col-md-6"> <% @met = @bus.morning_end_time.to_s %> <% @met = @met.tr('UTC', '')%> <label class="my_value"> <%= @met %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Evening Start Time:</label> </div> <div class="col-md-6"> <% @est = @bus.evening_start_time.to_s %> <% @est = @est.tr('UTC', '')%> <label class="my_value"> <%= @est %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Evening End Time:</label> </div> <div class="col-md-6"> <% @eet = @bus.evening_end_time.to_s %> <% @eet = @eet.tr('UTC', '')%> <label class="my_value"> <%= @eet %> </label> </div> </div> <div class="row"> <div class="col-md-6"> <label class="my_label">Description:</label> </div> <div class="col-md-6"> <label class="my_value"> <%= @bus.description %> </label> </div> </div> <div class="row"> <div class="col-md-3"> </div> <div class="col-md-3"> <%= link_to 'Edit', edit_bus_path(@bus), class:"btn btn-warning btn-block" %> </div> <div class="col-md-3"> <%= link_to 'Back', buses_path, class:"btn btn-block btn-info"%> </div> <div class="col-md-3"> </div> </div> </div> </div> </div> </div> <file_sep>class BusesController < ApplicationController before_action :set_bus, only: [:show, :edit, :update, :destroy] # GET /buses # GET /buses.json def index @buses = Bus.all end # GET /buses/1 # GET /buses/1.json def show end # GET /buses/new def new @user = User.new @bus = Bus.new end # GET /buses/1/edit def edit @bus = Bus.find(params[:id]) @user = User.find(@bus.user_id) end # POST /buses # POST /buses.json def create @bus = Bus.new(bus_params) @user = User.new @user.email = params[:bus][:email] @user.password = <PASSWORD>[:<PASSWORD>] @user.password_confirmation = <PASSWORD>[:bus][:password_confirmation] @user.role = 1 if @user.save @bus.user_id = @user.id respond_to do |format| if @bus.save format.html { redirect_to buses_path, notice: 'Bus was successfully created.' } format.json { render :show, status: :created, location: @bus } else format.html { render :new } format.json { render json: @bus.errors, status: :unprocessable_entity } end end else respond_to do |format| format.html { render :new } format.json { render json: @user.errors, status: :unprocessable_entity } end end end # PATCH/PUT /buses/1 # PATCH/PUT /buses/1.json def update respond_to do |format| if @bus.update(bus_params) format.html { redirect_to @bus, notice: 'Bus was successfully updated.' } format.json { render :show, status: :ok, location: @bus } else format.html { render :edit } format.json { render json: @bus.errors, status: :unprocessable_entity } end end end # DELETE /buses/1 # DELETE /buses/1.json def destroy @bus.destroy respond_to do |format| format.html { redirect_to buses_url, notice: 'Bus was successfully destroyed.' } format.json { head :no_content } end end private # Use callbacks to share common setup or constraints between actions. def set_bus @bus = Bus.find(params[:id]) end # Never trust parameters from the scary internet, only allow the white list through. def bus_params params.require(:bus).permit(:bus_number, :email, :password, :password_confirmation, :current_lat, :current_lan, :description, :evening_start_time, :evening_end_time, :morning_start_time, :morning_end_time, :status, :user_id) end end <file_sep><div class="row"> <div class="col-md-12"> <center><h1>Shwoing All Buses</h1></center> </div> <div class="col-md-12"> <div class="table-responsive"> <table class="table table-striped table-bordered table-hover"> <thead class="thead-dark"> <tr> <th>Driver Email</th> <th>Current lat</th> <th>Current lan</th> <th>Morning start time</th> <th>Morning End Time</th> <th>Evening start time</th> <th>Evening end time</th> <th colspan="3" style="text-align:center;">Actions</th> </tr> </thead> <tbody> <% @buses.each do |bus| %> <tr> <td><%= bus.user.email%></td> <td><%= bus.current_lat %></td> <td><%= bus.current_lan %></td> <% @mst = bus.morning_start_time.to_s %> <% @mst = @mst.tr('UTC', '')%> <td><%= @mst %></td> <% @met = bus.morning_end_time.to_s %> <% @met = @met.tr('UTC', '')%> <td><%= @met %></td> <% @est = bus.evening_start_time.to_s %> <% @est = @est.tr('UTC', '')%> <td><%= @est %></td> <% @eet = bus.evening_end_time.to_s %> <% @eet = @eet.tr('UTC', '')%> <td><%= @eet %></td> <td><%= link_to 'Show', bus, class:"btn btn-success"%></td> <td><%= link_to 'Edit', edit_bus_path(bus) , class:"btn btn-info"%></td> <td><%= link_to 'Destroy', bus, method: :delete, data: { confirm: 'Are you sure?' }, class:"btn btn-danger" %></td> </tr> <% end %> </tbody> </table> </div> </div> </div> <file_sep>class Api::V1::RegistrationsController < ApplicationController #skip_before_action :authenticate_user_from_token!, :only => [:create, ], :raise => false def create begin if params[:password] == params[:password_confirmation] @user = User.new @user.email = params[:email] @user.password = params[:password] @user.password_confirmation = params[:password_confirmation] @user.role = 2 if @user.save @std = Student.new @std.name = params[:name] @std.roll_number = params[:roll_number] @std.batch_number = params[:batch_number] @std.semseter = params[:semseter] @std.department = params[:name] @std.user_id = @user.id if @std.save render json: { :user => @user.as_json(:except => [:created_at, :updated_at], :include => [:student])}, status: 200 else render json: {:errors => @std.errors.full_messages}, status: 200 end else render json: {:errors => @user.errors.full_messages}, status: 200 end else render json: "-1", status: 200 end rescue render json: "-2", status: 200 end end end <file_sep>class Bus < ApplicationRecord attr_accessor :email, :password, :password_confirmation belongs_to :user validates :bus_number, presence: true validates :current_lat, presence: true validates :current_lan, presence: true validates :description, presence: true validates :evening_start_time, presence: true validates :evening_end_time, presence: true validates :morning_start_time, presence: true validates :morning_end_time, presence: true end <file_sep>Rails.application.routes.draw do resources :students resources :buses devise_for :users, path: 'users', controllers: { sessions: 'users/sessions'} resources :homes namespace :api, defaults: {format: :json} do namespace :v1 do resources :sessions, :only => [:create] resources :registrations do collection do post :change_password end end end end root 'homes#index' # For details on the DSL available within this file, see http://guides.rubyonrails.org/routing.html end
5577934f1feef28ad3c13e4f8bfd1ed778920dcb
[ "HTML+ERB", "Ruby" ]
7
HTML+ERB
Maha1469/university_bus_tracking_system
05dc78f2cef6eb5c6f8737b7cd7052972c9a7f64
67275ac4e9687172af65b17dedfbc28123a7942d
refs/heads/master
<file_sep>from django.http import HttpResponse from django.contrib import messages from django.contrib.auth import login, logout from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.shortcuts import render, redirect from social.apps.django_app.utils import psa from newmusic.utils.soundcloud import client, get_user_avatar, get_user_permalink from newmusic.utils.opinions import sort_true, sort_false @login_required def user_root(request, username): """ view for user profile, pulls liked songs from database to populate template, as well as a few other pieces of data about user from soundcloud profile """ if request.user.username == username: true_list = sort_true(request.user) false_list = sort_false(request.user) avatar_url = get_user_avatar(request.user) permalink_url = get_user_permalink(request.user) return render(request, 'user_page.html', { 'username': username, 'true_list': true_list, 'false_list': false_list, 'avatar_url': avatar_url, 'permalink_url': permalink_url, }) else: return HttpResponse("not an active user!") @psa('social:complete') def register_by_access_token(request, backend): """ registers user through soundcloud social_auth """ code = request.GET.get('code') resource = client.exchange_token(code) user = request.backend.do_auth(resource.access_token) if user: redirect_url = reverse('explore') if "next" in request.session: redirect_url = request.session.pop("next") login(request, user) return redirect(redirect_url) else: return HttpResponse("error") def login_redirect(request): """ redirects to login page w/ soundcloud """ if "next" in request.GET: request.session["next"] = request.GET.get("next") return render(request, 'register.html', {"soundcloud_redirect" : client.authorize_url()}) def logout_view(request): """ sends successful logout message to about page """ logout(request) messages.success(request, "You have successfully logged out!") return redirect(reverse('about')) <file_sep>from newmusic.utils.soundcloud import get_artists, get_rand_track_for_artist from newmusic.main.models import Artist, Song def collect_artists(): """ Calls on soundcloud utils to populate database with artists """ artists = get_artists() for artist in artists: if not Artist.objects.filter(name=artist['permalink']).exists(): if artist.get('description'): #truncate description to description max length artist['description'] = artist['description'][:Artist._meta.get_field('description').max_length] Artist.objects.create( name=artist['permalink'], sc_id=artist['id'], url=artist['permalink_url'], avatar_url=artist['avatar_url'], country=artist['country'], city=artist['city'], website=artist['website'], description=artist['description'], followers_count=artist['followers_count'] ) def collect_songs(artists=None): """ Calls on soundcloud utils to populate database with songs based on collected artists """ artists = artists if artists else Artist.objects.all() for artist in artists: song_dict = get_rand_track_for_artist(artist) if ( song_dict and not Song.objects.filter(artist_id=song_dict['artist_id']).exists() ): Song.objects.create( name=song_dict['permalink'], url=song_dict['permalink_url'], playback_count=song_dict['playback_count'], artist_id=song_dict['artist_id'] ) <file_sep>from django.forms import ModelForm from django import forms from newmusic.main.models import Opinion class OpinionForm(ModelForm): opinion = forms.BooleanField(required=False, widget=forms.HiddenInput()) class Meta: model = Opinion fields = ['opinion', 'artist'] widgets = {'artist': forms.HiddenInput()} class DeleteOpinionForm(forms.ModelForm): class Meta: model = Opinion fields = ['opinion'] widgets = {'opinion': forms.HiddenInput()} <file_sep>import random import logging import soundcloud from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from newmusic.main.models import Artist, Song logger = logging.getLogger(__name__) # create client object with app credentials client = soundcloud.Client( client_id=settings.SOCIAL_AUTH_SOUNDCLOUD_KEY, client_secret=settings.SOCIAL_AUTH_SOUNDCLOUD_SECRET, redirect_uri=settings.SOUNDCLOUD_REDIRECT_URI) def get_artists(): """ Returns artists_list from Soundcloud API with specific follower count. Collects various information about artist to be saved into database """ page_size = 100 artists_list = [] url = '/users' counter = 0 max_artists = 200 while url and len(artists_list) < max_artists: logger.warning( 'retrieving artists, currently have %s, on loop %s', len(artists_list), counter) response = client.get(url, limit=page_size, linked_partitioning=1) for resource in response.collection: if ( resource.followers_count >= 1000 and resource.followers_count <= 5000 and resource.track_count > 0 ): artists_list.append({ "permalink": resource.permalink, "permalink_url": resource.permalink_url, "followers_count": resource.followers_count, "avatar_url": resource.avatar_url, "id": resource.id, "country": resource.country, "city": resource.city, "website": resource.website, "description": resource.description, }) if len(artists_list) == max_artists: break counter = counter + 1 url = getattr(response, 'next_href', None) artists_list = sorted(artists_list, key=lambda artist: artist["followers_count"]) return artists_list def get_rand_track_for_artist(artist_obj): """ Returns random song from artist object using soundcloud API, collects information about song to be saved into database. """ response = client.get('users/{}/tracks'.format(artist_obj.sc_id)) track = random.choice(response) artist_id = artist_obj.id permalink = track.obj['permalink'] permalink_url = track.obj['permalink_url'] playback_count = track.obj['playback_count'] return ({ 'permalink':permalink, 'permalink_url':permalink_url, 'playback_count':playback_count, 'artist_id':artist_id }) def get_user_avatar(user): """ Returns user avatar from Soundcloud profile """ try: sc = user.social_auth.get(provider='soundcloud') except ObjectDoesNotExist: return None return sc.extra_data.get('avatar_url') def get_user_permalink(user): """ Returns user permalink from Soundcloud profile """ try: sc = user.social_auth.get(provider='soundcloud') except ObjectDoesNotExist: return None return sc.extra_data.get('permalink_url') <file_sep>{% extends '__base.html' %} {% load staticfiles %} {% block 'content' %} {% block 'scripts' %} {{ block.super }} <script type="text/javascript" src="{% static 'js/user_page.js' %}"></script> {% endblock %} <div class="container-fluid"> <div class="row"> <div class="col-lg-12"> <div class="well"> <a href='{{ permalink_url }}'><img src='{{ avatar_url }}'></a> <h1><a href='{{ permalink_url }}'>{{ username }}</a></h1> </div> </div> <div class="row"> <div class="col-xs-12"> <p class="text-muted" id="delete_notice"><span class="glyphicon glyphicon-ok"></span> Deleted</h6> </div> </div> </div> {% if true_list %} <div class="row"> <div class="col-lg-12"> </div> </div> <div class="row user_row"> {% for opinion in true_list %} <div class="col-lg-3 col-md-4 col-sm-6"> <div class="well"> <form class="form delete hover-btn" method='POST' action="{% url 'opinion_delete' opinion.pk %}">{% csrf_token %} <div class="form-group"> <button type='submit' id="delete_button" class="btn btn-default"> <span class="glyphicon glyphicon-remove-circle"> </span> </button> </div> </form> <h5><a href="{% url 'artist_page' opinion.artist.name %}">{{ opinion.artist.name }}</a> <br> <a href="{% url 'artist_page' opinion.artist.name %}"><img src="{{ opinion.artist.avatar_url }}"></a> <br> Followers: {{ opinion.artist.followers_count }} <br> </h5> </div> </div> {% endfor %} {% else %} <div class="row"> <div class="col-lg-12"> <div class="well"> <h4>You haven't liked any artists yet!</h4> <br> <h5><a href="{% url 'explore' %}">click here to start</a></h5> </div> </div> </div> {% endif %} </div> </div> {% endblock %} <file_sep> from django.contrib.auth.decorators import login_required from django.http import HttpResponse, HttpResponseBadRequest from django.shortcuts import render, redirect, get_object_or_404 from django.utils.decorators import method_decorator from django.views.generic import View from newmusic.main.forms import OpinionForm from newmusic.main.models import Artist from newmusic.utils.opinions import get_unique_artist @method_decorator(login_required, name='dispatch') class ArtistIndex(View): template_name = "explore.html" def get(self, request): """ pulls unique artist from database, sends artist along with song and all important information about both to template """ artist = get_unique_artist(request.user) if artist is None: return render(request, "no_artist.html") user = request.user song = artist.song_set.first() if song is None: return HttpResponse("No Song") song_url = song.url song_pb_count = song.playback_count like_form = OpinionForm({'artist': artist.id, 'opinion': True}) dislike_form = OpinionForm({'artist': artist.id, 'opinion': False}) return render(request, self.template_name, { 'artist': artist, 'song_url': song_url, 'song_pb_count': song_pb_count, 'user': user, 'like_form': like_form, 'dislike_form': dislike_form, }) def post(self, request): """ Saves user opinion on artist with hidden form """ form = OpinionForm(request.POST) if form.is_valid(): opinion = form.save(commit=False) opinion.user = request.user opinion.save() else: print(form.errors) return HttpResponseBadRequest("Form is invalid") return redirect('explore') @method_decorator(login_required, name='dispatch') class OpinionDelete(View): def post(self, request, pk): """ Deletes user opinion on artist with hidden form """ opinion = get_object_or_404(request.user.opinion_set, pk=pk) opinion.delete() if request.is_ajax(): return HttpResponse(status=204) return redirect("user_page", request.user.username) class AboutIndex(View): template_name = "about.html" def get(self, request): """ View for basic about page """ return render(request, self.template_name) class ArtistPage(View): template_name = "artist_page.html" def get(self, request, artist): """ Pulls info from artist in database to populate specific artist page, as well as song info tied to that artist """ artist_object = get_object_or_404(Artist, name=artist) song = artist_object.song_set.first() return render(request, self.template_name, {'artist': artist_object, 'song': song}) <file_sep><nav class="navbar navbar-default navbar-static-top"> <div class="container-fluid"> <div class="row"> <div class="col-xs-12"> <ul class="nav navbar-nav"> <div class="navbar-header"> <strong class="navbar-brand">newmusic</strong> </div> <li><a href="{% url 'explore' %}">Explore</a></li> <li><a href="{% url 'about' %}">About</a></li> {% if request.user.is_authenticated %} <li><a href="{% url 'user_page' user.username %}">{{ user.username }}</a></li> <li><a href="{% url 'logout' %}">Logout</a></li> {% else %} <li><a href="{% url 'login' %}">Login</a> {% endif %} </ul> </div> </div> </div> </nav> <file_sep>from django.conf.urls import url, patterns from . import views # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns('', # the view to register our user with a third party token # the backend is the python social auth backend e.g. facebook url(r'^register-by-token/(?P<backend>[^/]+)/$', views.register_by_access_token), url(r'^login$', views.login_redirect, name="login"), url(r'^logout$', views.logout_view, name="logout"), url(r'^(?P<username>[\w.@+-]+)/$', views.user_root, name="user_page"), ) <file_sep>from django.conf import settings from django.contrib.auth.models import User from newmusic.main.models import Artist, Opinion def sort_true(user): """ Returns true values from user's opinion table """ true_list = user.opinion_set.filter(opinion=True) return true_list def sort_false(user): """ Returns false values from user's opinion table """ false_list = user.opinion_set.filter(opinion=False) return false_list def get_unique_artist(user): """ Return a random artist that a user has no opinion on """ seen_ids = user.opinion_set.values_list("artist_id", flat=True) return Artist.objects.exclude(id__in=seen_ids).order_by('?').first() <file_sep>#New Music This app uses the Soundcloud API to bring upcoming artists to the user. It displays artists with low follower counts along with a song to sample. A user can like an artist to view their profile later. ## Installation: ### System requirements Newmusic requires [python3](https://www.python.org/download/releases/3.0/), [pip](https://pypi.python.org/pypi/pip), [virtualenvwrapper](https://virtualenvwrapper.readthedocs.org/en/latest/) and [postgresql](http://www.postgresql.org/). To install system dependencies on OS X using [Homebrew](http://brew.sh/): ```bash # Install system dependencies brew install python3 postgresql # pip will be installed by python3 pip install virtualenvwrapper # virtualenvwrapper must be added to your shell profile to work cat <<EOF >> ~/.bash_profile export WORKON_HOME=~/.virtualenvs [ -f /usr/local/bin/virtualenvwrapper.sh ] && . /usr/local/bin/virtualenvwrapper.sh EOF ``` ### Installing newmusic ```bash # Clone the repository git clone <EMAIL>:postlight/newmusic.git cd newmusic # Create a virtualenv and install dependencies via pip mkvirtualenv newmusic pip install -r requirements.txt # Migrate your database python manage.py migrate # Run the development server python manage.py runserver ``` You should now have a dev instance running at https://localhost:8000 ## Deploying newmusic currently lives at https://mighty-everglades-78445.herokuapp.com/ To deploy your own version on Heroku: First create a [free Heroku account](https://signup.heroku.com/dc) if you don't have one. Download the [Heroku Toolbelt](https://devcenter.heroku.com/articles/getting-started-with-python#set-up). ```bash # User your credentials to log in to Heroku from terminal heroku login # After successfully logging in, create your new Heroku project from the newmusic directory heroku create # Deploy the code through git git push heroku master ``` A Procfile and requirements.txt are necessary for deployment. Heroku uses postgresql as its database as well. These things should be handled within your cloned version of the app. [Learn more](https://devcenter.heroku.com/articles/getting-started-with-python#introduction) about Heroku and Python. ###Using Heroku via Command Line ```bash # Check your logs heroku logs # Or heroku logs --tail # To run app locally heroku local web # To migrate heroku run python manage.py migrate # To open a shell through heroku heroku run python manage.py shell ``` <file_sep>from social.backends.soundcloud import SoundcloudOAuth2 as OriginalOauth class SoundcloudOAuth2(OriginalOauth): EXTRA_DATA = OriginalOauth.EXTRA_DATA + [('avatar_url', 'avatar_url')] + [('permalink_url', 'permalink_url')] <file_sep>from django.db import models from django.conf import settings class Artist(models.Model): name = models.CharField(max_length=100) sc_id = models.IntegerField(unique=True) url = models.URLField(max_length=100, blank=True, null=True) avatar_url = models.URLField(max_length=100, blank=True, null=True) followers_count = models.IntegerField(default=0) country = models.CharField(max_length=100, null=True, blank=True) city = models.CharField(max_length=100, null=True, blank=True) website = models.URLField(max_length=100, null=True, blank=True) description = models.CharField(max_length=400, null=True, blank=True) def __str__(self): return self.name class Song(models.Model): name = models.CharField(max_length=100) url = models.CharField(max_length=100) playback_count = models.IntegerField() artist = models.ForeignKey('main.Artist', on_delete=models.CASCADE) class Opinion(models.Model): opinion = models.BooleanField() user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) artist = models.ForeignKey('main.Artist', on_delete=models.CASCADE) class Meta: unique_together = ("user", "artist") <file_sep>var animationDone = 'webkitAnimationEnd mozAnimationEnd MSAnimationEnd oanimationend animationend'; $(function() { $('#next').hide(); $('#success').hide(); $('#like').on('submit', function(e) { e.preventDefault(); var form = $(this); $.ajax({ type: form.attr('method'), url: e.target.action, data: form.serialize() }) .done(function() { $('form').hide(); $('#next').show(); $('#success').show(); }); }); $('#dislike').on('submit', function(e) { e.preventDefault(); var form = $(this); $.ajax({ type: form.attr('method'), url: e.target.action, data: form.serialize() }) .done(function() { $('#artist_explore') .addClass('animated fadeOutRight') .one(animationDone, function() { location.reload(true); }); }); }); $('#next').on('click', function(e) { e.preventDefault(); $('#artist_explore') .addClass('animated fadeOutRight') .one(animationDone, function() { location.reload(true); }); }); }); <file_sep>Django==1.9.2 Jinja2==2.8 MarkupSafe==0.23 PyJWT==1.4.0 appnope==0.1.0 coverage==4.0.3 decorator==4.0.6 defusedxml==0.4.1 dj-database-url==0.4.0 django-braces==1.8.1 django-frontend==1.7.0 django-oauth-toolkit==0.10.0 django-oauth2-provider==0.2.6.1 django-simple-menu==1.2.0 django-social-auth==0.7.28 fudge==1.1.0 gnureadline==6.3.3 gunicorn==19.4.5 httplib2==0.9.2 ipdb==0.8.1 ipython-genutils==0.1.0 ipython==4.0.1 lxml==3.5.0 oauth2==1.9.0.post1 oauthlib==1.0.3 path.py==8.1.2 pexpect==4.0.1 pickleshare==0.5 psycopg2==2.6.1 ptyprocess==0.5 python-openid==2.2.5 python-social-auth==0.2.13 python3-openid==3.0.9 requests-oauthlib==0.6.0 requests==2.9.1 shortuuid==0.4.2 simplegeneric==0.8.1 simplejson==3.8.1 six==1.10.0 soundcloud==0.5.0 traitlets==4.0.0 wheel==0.24.0 whitenoise==2.0.6 <file_sep>web: gunicorn newmusic.wsgi --log-file -<file_sep>from django.core.management.base import BaseCommand from newmusic.utils.populate import collect_artists, collect_songs class Command(BaseCommand): help = "Collects Artists and Songs from Soundcloud API and saves into database" def handle(self, **options): collect_artists() collect_songs() self.stdout.write(self.style.SUCCESS('Successfully saved artists and songs')) <file_sep>from mock import Mock, patch from django.test import TestCase from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse from newmusic.main.models import Artist, Song from newmusic.main.forms import OpinionForm from newmusic.utils.populate import collect_artists, collect_songs from newmusic.utils.soundcloud import get_user_avatar, get_user_permalink User = get_user_model() class ArtistPageViewTestCase(TestCase): def setUp(self): self.user = User.objects.create_user(username='testusername', password='<PASSWORD>') self.artist = Artist.objects.create(name="artistname", sc_id=1) def test_artist_page_anonymous(self): url = reverse('artist_page', args=[self.artist.name]) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_artist_page_authed(self): self.client.force_login(self.user) url = reverse('artist_page', args=[self.artist.name]) response = self.client.get(url) self.assertEqual(response.status_code, 200) class AboutViewTestCase(TestCase): def test_about(self): response = self.client.get('/about/') self.assertEqual(response.status_code, 200) class UserViewTests(TestCase): def setUp(self): self.user = User.objects.create_user(username='testusername', password='<PASSWORD>') def test_user_page_anonymous(self): url = reverse('user_page', args=[self.user.username]) response = self.client.get(url) self.assertEqual(response.status_code, 302) def test_user_page_authed(self): self.client.force_login(self.user) url = reverse('user_page', args=[self.user.username]) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_user_page_inactive_user(self): self.client.force_login(self.user) url = reverse('user_page', args=['fdafdc']) response = self.client.get(url) self.assertContains(response, "not an active user!") class HomeViewTests(TestCase): def setUp(self): self.user = User.objects.create_user(username='testusername', password='<PASSWORD>') def test_home_anonymous(self): response = self.client.get('') self.assertEqual(response.status_code, 302) def test_home_authed(self): self.client.force_login(self.user) response = self.client.get('') self.assertEqual(response.status_code, 200) def test_view_with_no_artists(self): self.client.force_login(self.user) response = self.client.get(reverse('explore')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'no_artist.html') def test_form_submit(self): artist = Artist.objects.create(name="artistname", sc_id=1) self.client.force_login(self.user) response = self.client.post(reverse('explore'), {'artist': artist.id, 'opinion': True}) self.assertEqual(response.status_code, 302) def test_form_submit_invalid_artist(self): # artist = Artist.objects.create(name="artistname", sc_id=1) self.client.force_login(self.user) response = self.client.post(reverse('explore'), {'opinion': True}) self.assertEqual(response.status_code, 400) def test_form_submit_invalid_opinion(self): artist = Artist.objects.create(name="artistname", sc_id=1) self.client.force_login(self.user) response = self.client.post(reverse('explore'), {'artist': artist.id}) self.assertEqual(response.status_code, 400) class PopulateTests(TestCase): def test_no_song(self): """ tests that we don't save songs that have a value of None """ with patch( 'newmusic.utils.populate.get_rand_track_for_artist', return_value={} ): collect_songs([{}]) self.assertEqual(Song.objects.count(), 0) def test_already_saved_song(self): """ tests that we don't save a song that is already in db """ Song.objects.create(name='example', url='exmaple.org', playback_count=33, artist_id=1) with patch( 'newmusic.utils.populate.get_rand_track_for_artist', return_value= ({ 'permalink':'example', 'permalink_url': 'example.org', 'playback_count': 33, 'artist_id': 1 }) ): collect_songs([{}]) self.assertEqual(Song.objects.count(), 1) class FormTests(TestCase): def test_form_error(self): form_data = {'whatever':'whatever'} form = OpinionForm(data=form_data) self.assertFalse(form.is_valid()) <file_sep>from django.conf.urls import url, patterns from newmusic.main import views urlpatterns = patterns('', url(r'^$', views.ArtistIndex.as_view(), name="explore"), url(r'^about/$', views.AboutIndex.as_view(), name="about"), url(r'^artist/(?P<artist>[\w.@+-]+)/$', views.ArtistPage.as_view(), name="artist_page"), url(r'^opinion/delete/(?P<pk>\d+)$', views.OpinionDelete.as_view(), name="opinion_delete"), )
5bc8ada4ac97f6dde0d01740e0d3b0515f7436ea
[ "HTML", "Markdown", "Procfile", "JavaScript", "Python", "Text" ]
18
HTML
emottk/newmusic
532e400123b7ab59017420fd9363592fd2ca2685
89538b89a829b1d4a4364b0a5746519e26f56da5
refs/heads/master
<repo_name>hiwepy/shiro-freemarker<file_sep>/README.md # shiro-freemarker ### 说明 > 基于[shiro-freemarker-tags](https://github.com/zhoushuaichang/shiro-freemarker-tags) 添加一些新的逻辑 ### Maven ``` xml <dependency> <groupId>${project.groupId}</groupId> <artifactId>shiro-freemarker</artifactId> <version>${project.version}</version> </dependency> ``` ### Sample 普通的项目整合: ```java cfg.setSharedVeriable("shiro", new ShiroTags()); ``` Spring Boot整合: ```java @Configuration public class FreemarkerConfig { @Autowired private freemarker.template.Configuration configuration; /** * Shiro整合Freemarker模板标签,使用方式参考:https://www.sojson.com/blog/143.html * @throws TemplateModelException */ @PostConstruct public void setSharedVariable() throws TemplateModelException { configuration.setSharedVariable("shiro", new ShiroTags()); } } ``` ###标签使用 [参考](https://www.sojson.com/blog/143.html): 1.guest(游客) ```xml <@shiro.guest> 您当前是游客,<a href="javascript:void(0);" class="dropdown-toggle qqlogin" >登录</a> </@shiro.guest> ``` 2.user(已经登录,或者记住我登录) ```xml <@shiro.user> 欢迎[<@shiro.principal/>]登录,<a href="/logout.shtml">退出</a> </@shiro.user> ``` 3.authenticated(已经认证,排除记住我登录的) ```xml <@shiro.authenticated> 用户[<@shiro.principal/>]已身份验证通过 </@shiro.authenticated> ``` 4.notAuthenticated(和authenticated相反) ```xml <@shiro.notAuthenticated> 当前身份未认证(包括记住我登录的) </@shiro.notAuthenticated> ``` 这个功能主要用途,识别是不是本次操作登录过的,比如支付系统,进入系统可以用记住我的登录信息,但是当要关键操作的时候,需要进行认证识别。 5.principal标签,这个要稍微重点讲讲。好多博客都是一下带过。 principal标签,取值取的是你登录的时候。在Realm实现类中的如下代码: ```xml .... return new SimpleAuthenticationInfo(user,user.getPswd(), getName()); ``` 在new SimpleAuthenticationInfo(第一个参数,....)的第一个参数放的如果是一个username,那么就可以直接用。 ```xml <!--取到username--> <@shiro.principal/> ``` 如果第一个参数放的是对象,比如我喜欢放User对象。那么如果要取username字段。 ```xml <!--需要指定property--> <@shiro.principal property="username"/> ``` 和Java如下Java代码一致 ```xml User user = (User)SecurityUtils.getSubject().getPrincipals(); String username = user.getUsername(); ``` 6.hasRole标签(判断是否拥有这个角色) ```xml <@shiro.hasRole name="admin"> 用户[<@shiro.principal/>]拥有角色admin<br/> </@shiro.hasRole> ``` 7.hasAnyRoles标签(判断是否拥有这些角色的其中一个) ```xml <@shiro.hasAnyRoles name="admin,user,member"> 用户[<@shiro.principal/>]拥有角色admin或user或member<br/> </@shiro.hasAnyRoles> ``` 8.lacksRole标签(判断是否不拥有这个角色) ```xml <@shiro.lacksRole name="admin"> 用户[<@shiro.principal/>]不拥有admin角色 </@shiro.lacksRole> ``` 9.hasPermission标签(判断是否有拥有这个权限) ```xml <@shiro.hasPermission name="user:add"> 用户[<@shiro.principal/>]拥有user:add权限 </@shiro.hasPermission> ``` 10.lacksPermission标签(判断是否没有这个权限) ```xml <@shiro.lacksPermission name="user:add"> 用户[<@shiro.principal/>]不拥有user:add权限 </@shiro.lacksPermission> ```<file_sep>/src/main/java/com/github/vindell/shiro/freemarker/tags/HasRoleTag.java package com.github.vindell.shiro.freemarker.tags; /** * <p>Equivalent to {@link com.github.vindell.shiro.freemarker.tags.web.tags.HasRoleTag}</p> */ public class HasRoleTag extends RoleTag { protected boolean showTagBody(String roleName) { return getSubject() != null && getSubject().hasRole(roleName); } } <file_sep>/src/main/java/com/github/vindell/shiro/freemarker/tags/HasPermissionTag.java package com.github.vindell.shiro.freemarker.tags; /** * <p>Equivalent to {@link com.github.vindell.shiro.freemarker.tags.web.tags.HasPermissionTag}</p> * * @since 0.1 */ public class HasPermissionTag extends PermissionTag { protected boolean showTagBody(String p) { return isPermitted(p); } }
94441fa3b0c90dc48429e475dae66e6a5d85afdc
[ "Java", "Markdown" ]
3
Java
hiwepy/shiro-freemarker
bb6f18aaec147730bae52a76b37a713eea6a6572
3216f18a6169d2d142fcf6c3c9b758beb8620688
refs/heads/master
<repo_name>Klymyk/mysite<file_sep>/js/script.js $(document).ready(function () { var button_done = "#done"; var button_sort = "#sort"; $(button_done).click(function () { var comment = $("textarea").val(); $("<li>" + comment + "</li>").appendTo(".ulcom"); $("textarea").val(''); $("textarea").focus(); }); function sortUnorderedList() { var listArray = []; var list = $(".ulcom").children('li'); $(".ulcom").children('li').each(function(n,v){listArray.push($(this).text());}) var vals = BubbleSort(listArray); for(i = 0, l = list.length; i < l; i++) list[i].innerHTML = vals[i]; } function BubbleSort(A) { var n = A.length; for (var i = 0; i < n-1; i++) { for (var j = 0; j < n-1-i; j++) { if (A[j+1].length < A[j].length) { var t = A[j+1]; A[j+1] = A[j]; A[j] = t; } } } return A; } $(button_sort).click(function () { sortUnorderedList('.ulcom'); }); })
8fd7abe9acf9dd9689cb1ab26fe909333074d113
[ "JavaScript" ]
1
JavaScript
Klymyk/mysite
ff5a959f8f5b1430e62727719bc7e3531ac6618a
c995e0cd96398fa426baacab9aea0fc3eb174d88
refs/heads/master
<file_sep>#%% import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() import matplotlib.pyplot as plt # %% data_volby = pd.read_excel("./data/parlgov.xlsx", sheet_name="election") data_party = pd.read_excel("./data/parlgov.xlsx", sheet_name="party") #%% # %% data = data_volby.merge(data_party[['party_id', 'family_name']], on="party_id", how="left") #%% data.election_date = pd.to_datetime(data.election_date, format='%Y-%m-%d') #%% data['election_year'] = data.election_date.apply(lambda x: int(str(x).split('-')[0])) #%% # druhy graf cesko x = data[(data.country_name_short == 'CZE') & (data.election_year >= 1996) & (data.election_type == "parliament")] xd = pd.DataFrame(x.groupby(['election_year', 'family_name']).vote_share.sum().reset_index()) #%% xd #%% family_cze = {'Christian democracy': 'křesťanská demokracie', 'Communist/Socialist': 'komunisti', 'Conservative': 'konzervativci', 'Liberal': 'liberálové', 'Right-wing': 'pravice', 'Social democracy': 'sociální demokracie', 'Green/Ecologist': 'zelení', 'Special issue': 'jedno téma'} out = [] for party in xd.family_name.unique(): tmp = xd[xd.family_name == party] out.append({ 'name': family_cze[party], 'data': list(map(lambda x: [x[0], x[2]], tmp.values)), 'step': 'right' }) #%% pd.DataFrame(data[(data.election_type == "parliament") & (data.election_year >= 1996 ) & (data.country_name_short == 'CZE')].groupby('family_name').party_name.unique()).to_dict() #%% chci=["Social democracy"] staty = ['Czech Republic', 'Germany', 'France', 'Hungary', 'Poland', 'Slovakia', 'Sweden', 'Netherlands'] staty_cze = ['Česká republika', 'Německo', 'Francie', 'Maďarsko', 'Polsko', 'Slovensko', 'Švédsko', 'Nizozemsko'] staty_trans = dict(zip(staty, staty_cze)) data = data[ (data.election_type == "parliament") & (data.election_year >= 1996 ) & data.family_name.isin(chci) & data.country_name.isin(staty)] #%% for country in sorted(list(data.country_name.unique())): tmp = data[data.country_name == country] chrt = sns.lineplot(x="election_year", y="vote_share", hue="family_name", data=tmp, ci=None) chrt.set_title(country) plt.legend(loc='upper left') plt.show() #%% d = pd.DataFrame(data.groupby(['country_name', 'election_year']).vote_share.sum().reset_index()) # %% out = [] for cntry in d.country_name.unique(): tmp = d[d.country_name == cntry] out.append({ 'name': staty_trans[cntry], 'data': list(map(lambda x: [x[1], x[2]], tmp.values)), 'step': 'right' })
df077ab5a07196f06598b5f9a40eb1aec70e0464
[ "Python" ]
1
Python
DataRozhlas/upadek-stran-plus
baa31251157a109b920e9f4ee1a051aa33d20780
12a1b91e1d3b3d3f0d8b61f55dd78a59bd4b5225
refs/heads/master
<file_sep> MYGAME.controller = (function () { // declare objects here var score , level , exploder , multiplier , baseScore ; function init () { MYGAME.isActive = true; multiplier = 1; baseScore = 100; score = 0; level = 1; exploder = particleInit(); MYGAME.ShadedField.init(); MYGAME.Marker.init(); MYGAME.Keyboard.Down = []; MYGAME.Keyboard.Up = []; } function run () { MYGAME.isActive = true; multiplier = 1; baseScore = 100; score = 0; level = 1; exploder = particleInit(); MYGAME.ShadedField.init(); MYGAME.Marker.init(); MYGAME.Keyboard.Down = []; MYGAME.Keyboard.Up = []; } function update (elapsedTime) { MYGAME.ShadedField.update(level); MYGAME.Marker.update(elapsedTime); exploder.update(elapsedTime); } function render () { MYGAME.Graphics.clear(); drawPlayingField(); MYGAME.ShadedField.render(); MYGAME.Marker.render(); renderStats(); exploder.render(); } function particleInit () { return MYGAME.particleEmitter([ { density: 6, direction: Random.PointOnUnitCircle(), image: MYGAME.images['img/redstar.png'], size: { mean: 12, stdev: 3 }, speed: { mean: 100, stdev: 70 }, lifetime: { mean: 0.25, stdev: 0.1 } }], 2 ); } function drawPlayingField () { MYGAME.Graphics.context.beginPath(); MYGAME.Graphics.context.lineWidth="10"; MYGAME.Graphics.context.strokeStyle = "black"; MYGAME.Graphics.context.rect(110, 350, 780, 300); MYGAME.Graphics.context.stroke(); } function renderStats () { MYGAME.Graphics.drawObject({ font: '30px Arial', color: '#FF0000', text: 'Score: ' + score, x: 800, y: 75 }); MYGAME.Graphics.drawObject({ font: '30px Arial', color: '#FF0000', text: 'Level: ' + level, x: 800, y: 150 }); } function levelUp (x) { score += multiplier * baseScore; if (level < 6) { level++; multiplier += 0.2; } exploder.explode({x:x, y:100}); var bell = new Audio('sounds/bell.mp3'); bell.play(); } function endGame () { MYGAME.isActive = false; } function getScore () { return score; } return { init : init, run : run, update : update, render : render, levelUp : levelUp, endGame : endGame, getScore: getScore } }())<file_sep> MYGAME.Nav = (function () { "use strict"; var showing; function hideAll () { $('.display-board').slideUp(); $('.navers').removeClass('selected'); } function disableNav () { $("#show-game").attr("disabled", "disabled"); $("#show-scores").attr("disabled", "disabled"); $("#show-credits").attr("disabled", "disabled"); } function enableNav () { $("#show-game").removeAttr("disabled"); $("#show-scores").removeAttr("disabled"); $("#show-credits").removeAttr("disabled"); } function showGame () { if (showing !== 'game') hideAll(); showing = 'game'; disableNav(); $('#show-game').addClass('selected'); $('#game').show('slow'); MYGAME.GameLoop.start(); } function showScores () { enableNav(); var scoresList = MYGAME.Scores.Get() , scoresEl = $('#scores'); ; scoresEl.empty(); if (scoresList.length == 0) { scoresEl.append('<li>Be the first to leave a score!</li>') } else { scoresList.forEach(function (s) { scoresEl.append('<li>' + s.name + ': ' + s.score + '</li>'); }) } if (showing !== 'scores') hideAll(); showing = 'scores'; $('#show-scores').addClass('selected'); $('#high-scores').show('slow'); } function showCredits () { if (showing !== 'credits') hideAll(); showing = 'credits'; $('#show-credits').addClass('selected'); $('#credits').show('slow'); } return { disableNav : disableNav, enableNav : enableNav, showGame : showGame, showScores : showScores, showCredits : showCredits, showing : showing } }());<file_sep><!DOCTYPE html> <html lang="en"> <head> <title>Make it Rain</title> <meta charset="utf-8"/> <link href="bootstrap.min.css" rel="stylesheet"> <link rel="stylesheet" href="style.css"> <script src = "http://code.jquery.com/jquery-1.9.1.js"></script> <script src = "scripts/modernizr.js"></script> <script src = "scripts/loader.js"></script> </head> <body> <div id="header" class="navbar-fixed-top container-fluid bg-primary"><h1 class="text-center">Make it RAIN</h1></div> <!-- NAVIGATION --> <div id="togler" class="btn-group btn-group-justified"> <div class="btn-group"> <button id="show-game" class="btn navers" onclick="MYGAME.Nav.showGame()">START GAME</button> </div> <div class="btn-group"> <button id="show-scores" class="btn navers" onclick="MYGAME.Nav.showScores()">HIGH SCORES</button> </div> <div class="btn-group"> <button id="show-credits" class="btn navers" onclick="MYGAME.Nav.showCredits()">CREDITS</button> </div> </div> <!-- GAME CANVAS --> <div id="game" class="display-board"> <canvas id="canvas" width="1000" height="800">Sorry not supported in this browser.</canvas> </div> <!-- HIGH SCORES --> <div id="high-scores" class="display-board"> <h1 class="centered">Congrats You just won a medal</h1> <ul id="scores"></ul> </div> <!-- CREDITS --> <div id="credits" class="display-board"> <h1 class="centered">Special Thanks to:</h1> <h4><NAME>............................Creator/Programmer</h4> <h4><NAME>.....................................................Instructor</h4> <h4><NAME>...............................Inspirational Advicer</h4> <h4><NAME>ensen.........................Chaotic Support</h4> <h4><NAME>en..............................Chaotic Support</h4> <h4><NAME>ensen........................Chaotic Support</h4> </div> <div id="footer" class="navbar-fixed-bottom container bg-primary"><h3>Created for CS 5410 by <NAME><h3></div> </body> </html><file_sep> MYGAME.Marker = (function () { var width , height , x , y , goRight , speed , canLevel ; function init () { width = 5; height = 300; x = 100; y = 350; goRight = true; speed = 1000; canLevel = true; elapsedTime = 0; } function update (elapsedTime) { move(elapsedTime); if (MYGAME.Keyboard.Down.indexOf(32) > -1 && canLevel) { var range = MYGAME.ShadedField.clickRange(); if (range.min < x && range.max > x + width) { speed -= 40; // canLevel = false; MYGAME.controller.levelUp(x + width/2); } else { MYGAME.controller.endGame(); } } MYGAME.Keyboard.Down = []; } function move (elapsedTime) { if (!elapsedTime) elapsedTime = 0; var distance = 800 * elapsedTime / speed; if (x + distance > 880) { goRight = false; // canLevel = true; } else if (x - distance < 100) { goRight = true; // canLevel = true; } if (goRight) { x += distance; } else { x -= distance; } } function getPosition () { return x + (width/2); } function render () { MYGAME.Graphics.context.beginPath(); MYGAME.Graphics.context.fillStyle = 'red'; MYGAME.Graphics.context.fillRect(x, y, width, height); MYGAME.Graphics.context.stroke(); } return { init : init, update : update, render : render, getPosition: getPosition } }())<file_sep> /* particleTypes = [ { density: 6, image: MYGAME.images['images/<NAME>.png'], center: { x: 300, y: 300 }, size: { mean: 12, stdev: 3 }, speed: { mean: 300, stdev: 70 }, lifetime: { mean: 0.25, stdev: 0.1 } } ] emitTime = time the particle emitter emits particles */ MYGAME.particleEmitter = function (particleTypes, emitTime) { 'use strict'; var systems = [] , explosionPoints = [] ; particleTypes.forEach(function (type) { systems.push(particleSystem(type, MYGAME.graphics)); }) function explode(point) { explosionPoints.push({point: point, timeleft: emitTime}); }; function update(time) { var elapsedSeconds = time / 1000; systems.forEach(function (s) { s.update(elapsedSeconds); }) explosionPoints.filter(function (p) { systems.forEach(function (s) { for(var i = 0; i < s.density; ++i) { s.create(JSON.parse(JSON.stringify(p.point))); } }) p.timeleft -= elapsedSeconds; return p.timeleft > 0 }) }; function render() { systems.forEach(function (s) { s.render(); }) }; return { explode: explode, update: update, render: render }; }; <file_sep>/*jslint browser: true, white: true, plusplus: true */ /*global Tools */ function particleSystem(spec, graphics) { 'use strict'; var that = {} , particles = [] // Set of all active particles ; that.density = spec.density; //------------------------------------------------------------------ // This creates one new particle //------------------------------------------------------------------ that.create = function (point) { debugger; var size = Random.Gaussian(spec.size.mean, spec.size.stdev) , p = { density: spec.density, image: spec.image, height: size, width: size, center: point, direction: spec.direction, speed: Random.Gaussian(spec.speed.mean, spec.speed.stdev), // pixels per second rotation: 0, lifetime: Random.Gaussian(spec.lifetime.mean, spec.lifetime.stdev), // How long the particle should live, in seconds alive: 0 // How long the particle has been alive, in seconds } ; p.size = Math.max(1, p.size); // Ensure we have a valid size - gaussian numbers can be negative p.lifetime = Math.max(0.01, p.lifetime); // Same thing with lifetime particles.push(p); }; //------------------------------------------------------------------ // Update the state of all particles. This includes remove any that // have exceeded their lifetime. //------------------------------------------------------------------ that.update = function (elapsedTime) { debugger; particles = particles.filter(function (particle, i, arr) { // update partical data arr[i].alive = particle.alive + elapsedTime; arr[i].center.x = particle.center.x + (elapsedTime * particle.speed * particle.direction.x); arr[i].center.y = particle.center.y + (elapsedTime * particle.speed * particle.direction.y); arr[i].rotation = particle.rotation + (particle.speed / 500); // remove dead particles return (particle.alive > particle.lifetime); }) }; //------------------------------------------------------------------ // Render all particles //------------------------------------------------------------------ that.render = function () { debugger; particles.forEach(function (p) { MYGAME.Graphics.drawObject(p); }) }; return that; }
f49b94bc70119a8194ec07a96f46a562242583d0
[ "HTML", "JavaScript" ]
6
HTML
freddyC/testgame
144a1672b50998568444cbb96a7f8a3239901e87
fc043f3bdf42eae23abe09042149019fc4ddb023
refs/heads/master
<repo_name>japhyf/mom<file_sep>/README.md # valer # mom
13f112026265e448eb69f9b2730e5efbf422a2a7
[ "Markdown" ]
1
Markdown
japhyf/mom
49c672ffccd29c92e6dca49741e6638412434c86
806a5de82eb8411ee635571e16f6a10a8842751d
refs/heads/master
<file_sep>FROM python:2.7 LABEL maintainer="<NAME>" EXPOSE 3111 WORKDIR /app COPY techtrends ./ RUN pip install -r requirements.txt RUN python init_db.py CMD ["python", "app.py"]
dce678d488e00cd1638b5fdfd88162f5ea80cb52
[ "Dockerfile" ]
1
Dockerfile
mathewfrancis/techtrends
eba1854997895def2118d31d8eddd7545140b97e
ac69720178f25d871dce73689e8e8c3213b8f598
refs/heads/master
<file_sep> class GameEvent { constructor() { // this.initializePrivateFields(options); } // initializePrivateFields(options) { // } } export default GameEvent; <file_sep>/* eslint-disable import/no-extraneous-dependencies */ /* eslint-disable no-console */ import gulp from 'gulp'; import babel from 'gulp-babel'; import del from 'del'; import eslint from 'gulp-eslint'; import webpack from 'webpack-stream'; import mocha from 'gulp-mocha'; import webpackConfig from './webpack.config.babel'; const paths = { serverJs: 'server/**/*.js', clientEntryPoint: 'src/javascripts/app.js', gulpFile: 'gulpfile.babel.js', webpackFile: 'webpack.config.babel.js', clientBundle: 'dist/client-bundle.js?(.map)', allSrcTests: 'src/test/**/*.js', libDir: 'lib', distDir: 'public/js', }; // Clean task // ----------- // Deletes the auto generated lib folder // gulp.task('clean', () => { return del([paths.libDir, paths.distDir]); }); // Build task // ----------- // First runs clean, then babel compiles code // gulp.task('build', ['lint', 'clean'], () => { return gulp.src(paths.serverJs) .pipe(babel()) .pipe(gulp.dest(paths.libDir)); }); // Lint task // ----------- // First runs build, then runs `node lib` // gulp.task('lint', () => { return gulp.src([ paths.serverJs, paths.gulpFile, paths.webpackFile, ]).pipe(eslint()) .pipe(eslint.format()) .pipe(eslint.failAfterError()); }); // Lint task // ----------- // First runs build, then runs `node lib` // gulp.task('test', ['build'], () => { return gulp.src(paths.allSrcTests) .pipe(mocha()); }); // Webpack task // ----------- // First runs build, then runs `node lib` // gulp.task('webpack', () => { return gulp.src(paths.clientEntryPoint) .pipe(webpack(webpackConfig)) .pipe(gulp.dest(paths.distDir)); }); // Main task // ----------- // First runs build, then runs `node lib` // gulp.task('main', ['test', 'webpack']); // Watch task // ----------- // Watches for code changes and then runs main task // gulp.task('watch', () => { gulp.watch(paths.serverJs, ['main']); }); // Default task // ----------- // Starts the watch, and then runs main to get things started // gulp.task('default', ['watch', 'main']); <file_sep>/* eslint-disable import/no-extraneous-dependencies, no-unused-expressions */ import path from 'path'; import chai from 'chai'; import { stub } from 'sinon'; import sinonChai from 'sinon-chai'; import { describe, it, beforeEach, before } from 'mocha'; import GameLog from '../../javascripts/entities/gameLog'; // lets us use chai assertions chai.should(); chai.use(sinonChai); // used for our game log object let gameLog; describe('GameLog Object', () => { describe('Constructor', () => { it('should set the file path with a given filePath option', () => { const thisGameLog = new GameLog({ filePath: 'Sample/File/Path', }); thisGameLog.getFilePath().should.equal('Sample/File/Path'); }); }); describe('Parse', () => { beforeEach(() => { gameLog = new GameLog({ filePath: path.join(__dirname, '..', 'data', 'testGameLog.csv') }); }); it('should show as parsed after parsing', () => { return gameLog.parse() .then(() => { gameLog.isParsed().should.be.true; }); }); }); describe('GetEvents', () => { before(() => { stub(GameLog.prototype, 'parse', function parseStub() { this.setEvents([{ type: 'shot', player: 'curry', }, { type: 'shot', player: 'durant', }, { type: 'rebound', player: 'durant', }]); }); }); beforeEach(() => { // putting an arbitrary filepath because it will fail if not gameLog = new GameLog({ filePath: 'needsfilepath' }); gameLog.parse(); }); it('should return an array of events', () => { gameLog.getEvents().should.be.instanceof(Array); }); it('should return an empty array when passing a fake event', () => { const events = gameLog.getEvents({ type: 'Fake event type' }); events.should.be.instanceof(Array); events.should.be.empty; }); it('should return events of a given type', () => { const events = gameLog.getEvents({ type: 'shot' }); events.should.deep.equal([{ type: 'shot', player: 'curry', }, { type: 'shot', player: 'durant', }]); }); it('should return events for a given player', () => { const events = gameLog.getEvents({ player: 'durant' }); events.should.deep.equal([{ type: 'shot', player: 'durant', }, { type: 'rebound', player: 'durant', }]); }); it('should return all events when no argument is passed', () => { const events = gameLog.getEvents(); events.should.deep.equal([{ type: 'shot', player: 'curry', }, { type: 'shot', player: 'durant', }, { type: 'rebound', player: 'durant', }]); }); }); });
55eef1861ca705ddfe3ae9e7ae718e428e1cd940
[ "JavaScript" ]
3
JavaScript
tcosentino/nba-d3
6ce26a7bbd74e00e8da8892a745deaf8fac92767
702ab9efb24de4df8ea5792ee8fbff2c15a569f8
refs/heads/master
<repo_name>SampsaL/NOOB_VLC<file_sep>/src/org/opencv/samples/colorblobdetect/DetectionActivity.java package org.opencv.samples.colorblobdetect; import org.opencv.android.BaseLoaderCallback; import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; import org.opencv.android.LoaderCallbackInterface; import org.opencv.android.OpenCVLoader; import org.opencv.android.CameraBridgeViewBase; import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2; import org.opencv.core.Mat; import android.animation.ObjectAnimator; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import android.util.Log; import android.view.MotionEvent; import android.view.View; import android.view.WindowManager; import android.view.View.OnTouchListener; import android.view.animation.DecelerateInterpolator; import android.widget.ProgressBar; import android.widget.TextView; import android.widget.Toast; import java.util.ArrayList; import java.util.Arrays; public class DetectionActivity extends Activity implements OnTouchListener, CvCameraViewListener2 { private static final String TAG = "OCVSample::Activity"; private boolean mIsColorSelected = false; private Mat mRgba; private Detector mDetector; private CameraBridgeViewBase mOpenCvCameraView; int count = 0; private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) { @Override public void onManagerConnected(int status) { if (status == LoaderCallbackInterface.SUCCESS) { Log.i(TAG, "OpenCV loaded successfully"); mOpenCvCameraView.enableView(); mOpenCvCameraView.setOnTouchListener(DetectionActivity.this); } else { super.onManagerConnected(status); } } }; public DetectionActivity() {} @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); setContentView(R.layout.detection_surface_view); mOpenCvCameraView = findViewById(R.id.detection_activity_surface_view); mOpenCvCameraView.enableFpsMeter(); mOpenCvCameraView.setMaxFrameSize(800,600); //800x600 for nokia 5.1 mOpenCvCameraView.setCvCameraViewListener(this); } @Override public void onPause() { super.onPause(); if (mOpenCvCameraView != null) mOpenCvCameraView.disableView(); } @Override public void onResume() { super.onResume(); if (!OpenCVLoader.initDebug()) { Log.e("INIT", "Internal OpenCV library not found. Using OpenCV Manager for initialization"); OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_4_0, this, mLoaderCallback);//(OpenCVLoader.OPENCV_VERSION_3_0_0, this, mLoaderCallback); } else { Log.e("INIT", "OpenCV library found inside package. Using it!"); mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS); } } public void onDestroy() { super.onDestroy(); if (mOpenCvCameraView != null) mOpenCvCameraView.disableView(); } public void onCameraViewStarted(int width, int height) { mDetector = new Detector(); //Toast.makeText(getApplicationContext(), "Detection running...", Toast.LENGTH_LONG).show(); //Toast.makeText(getApplicationContext(), "Tap screen to detect message", Toast.LENGTH_LONG).show(); mIsColorSelected = true; } public void onCameraViewStopped() { mRgba.release(); } public boolean onTouch(View v, MotionEvent event) { if (!mIsColorSelected){ Toast.makeText(getApplicationContext(), "Detection running", Toast.LENGTH_SHORT).show(); mIsColorSelected = true; } else { Toast.makeText(getApplicationContext(), "Detection paused", Toast.LENGTH_SHORT).show(); mIsColorSelected = false; } return false; } public Mat onCameraFrame(CvCameraViewFrame inputFrame) { mRgba = inputFrame.rgba(); TextView txt = findViewById(R.id.textView2); StringBuilder sb = new StringBuilder(); StringBuilder bb = new StringBuilder(); StringBuilder data = new StringBuilder(); StringBuilder xrt = new StringBuilder(); StringBuilder tooManyBuilders = new StringBuilder(); if (mIsColorSelected) { Log.e("msg", "------------------- Frame captured -------------------"); mDetector.process(mRgba); int max = mDetector.detected.length; for(int x = 0; x < max; x++) { int detections = mDetector.detected[x]; if (detections == 1) { sb.append("1"); } else { sb.append("0"); } if (sb.length() == max) { for (int j = 0; j < sb.length(); j++){ char str = sb.charAt(j); if ((j - 1) > 0) { char str1 = sb.charAt(j - 1); if (str1 == '1' && str == '0'){ bb.append(" "); } if (str1 == '0' && str == '1'){ bb.append(" "); } } if (str == '0'){ bb.append("0"); } if (str == '1'){ bb.append("1"); } } String[] dat = bb.toString().split(" "); for (String temp : dat) { if (temp.contains("1")) { data.append("1"); if (temp.length() >= 10) { data.append("1"); if (temp.length() >= 20) { data.append("11"); } } } if (temp.contains("0")) { data.append("0"); if (temp.length() >= 7) { data.append("0"); if (temp.length() > 17) { data.append("0"); if (temp.length() > 19) { data.append("0"); } } } } } String[] testing = data.toString().split("011110"); for (String s : testing) { if (s.length() == 28) { String[] new_f = s.replaceAll("..(?!$)", "$0 ").split(" "); // decoding Manchester code for (String temp : new_f) { if (temp.equals("10")) { xrt.append("1"); continue; } if (temp.equals("01")) { xrt.append("0"); continue; } if (temp.equals("00")) { xrt.append("-"); break; } if (temp.equals("11")) { xrt.append("-"); break; } } } } if (xrt.length() >= 14 && !xrt.toString().contains("-") ) { int decimalValue = Integer.parseInt(xrt.substring(0,7), 2); int charC = Integer.parseInt(xrt.substring(7, 14), 2); tooManyBuilders.append(decimalValue).append(" ").append((char) charC).append(" "); } if (xrt.length() >= 28 && !xrt.toString().contains("-") ) { int decimalValue1 = Integer.parseInt(xrt.substring(14,21), 2); int charC = Integer.parseInt(xrt.substring(21, 28), 2); tooManyBuilders.append(decimalValue1).append(" ").append((char) charC).append(" "); } if (xrt.length() >= 42 && !xrt.toString().contains("-") ) { int decimalValue2 = Integer.parseInt(xrt.substring(28,35), 2); int charC = Integer.parseInt(xrt.substring(35, 42), 2); tooManyBuilders.append(decimalValue2).append(" ").append((char) charC).append(" "); } addText(txt, tooManyBuilders.toString()); } } } return mRgba; } private void addText(final TextView text, final String value){ runOnUiThread(new Runnable() { @Override // TODO: fix (contains) because of possible future errors.. WIP for now.. // TODO: 'toggle' now waits for signal start (slows msg received down a bit) public void run() { String txt = text.getText().toString(); ProgressBar progress = findViewById(R.id.progressBar2); StringBuilder url_string = new StringBuilder(); ObjectAnimator animation = ObjectAnimator.ofInt(progress, "progress",count); progress.setMax(103); // #TODO: implement method for recording the end-of-signal, instead of a fixed value. String[] values = value.split(" "); String[] textview = txt.split(" "); int match = 0; for (int i = 0; i < values.length; i++) { for (int y = 0; y < textview.length; y++){ if (values[i].equals(textview[y])){ match = 1; break; } } if (match != 1 && values[i].length() >= 2){ Log.e("test", "adding value " + values[i]); text.append(values[i]); text.append(" "); match = 0; count++; progress.setProgress(count); } animation.setDuration(200); animation.setInterpolator(new DecelerateInterpolator()); animation.start(); } if (textview.length >= 103) { ArrayList<String> list = new ArrayList<>(); for (int i = 0; i <= 103; i++){ list.add(i," "); } for (int i = 0; i <= 102 ; i++){ if (Integer.parseInt(textview[i].split(" ")[0]) < 103){ Log.e("test", "attempting to remove index: "+ Integer.parseInt(textview[i].split(" ")[0])); list.remove(Integer.parseInt(textview[i].split(" ")[0])); list.add(Integer.parseInt(textview[i].split(" ")[0]), Arrays.toString(textview[i].split(" "))); Log.e("test", "looped list contains: " + list.toString()); } } url_string.append("https://"); for (int j = 0; j < list.size() - 1; j++){ url_string.append(list.get(j).split(" ")[1].replace("[", "").replace("]", "")); Log.e("test", "list to url: " + url_string.toString()); } text.setText(url_string); progress.setVisibility(View.GONE); startOOB(null); } } }); } public void startOOB(View view) { Intent intent = new Intent(this, oob.class); TextView txt = findViewById(R.id.textView2); String message = txt.getText().toString(); intent.putExtra("OOB", message); Log.e("Message", String.format("Starting OOB with: " + message)); count = 0; startActivity(intent); finish(); } }
f8ee3ad122b8890ee8b775768ac218467706a302
[ "Java" ]
1
Java
SampsaL/NOOB_VLC
4573ae9faced4ba15b3df5511b728d1957048768
ec1c6be04c8ddc41226c8046115a82576953e6e3
refs/heads/master
<repo_name>tanyav2/say_bye<file_sep>/say_bye.go package say_bye import "fmt" func SayBye() { fmt.Println("BYE YOU Crooked DINGUSI") } <file_sep>/go.mod module github.com/picowar/say_bye go 1.14
60313a99d2661055cc787b17f19256e20bb6004b
[ "Go Module", "Go" ]
2
Go Module
tanyav2/say_bye
309b6406ac7b9670fb30cb0b04528c2dc661b7b8
19515ec0fac0a28bc294dbbb8dea190d4309ed2d
refs/heads/master
<repo_name>dilip-dmk/vdsc<file_sep>/qwe.py import os from nltk.corpus.reader.plaintext import PlaintextCorpusReader from nltk import word_tokenize import re corpusdir = 'python/' # Directory of corpus. newcorpus = PlaintextCorpusReader(corpusdir, '.*') print(newcorpus.fileids()[0]) print(type(newcorpus)) #print newcorpus.raw() print newcorpus.words(newcorpus.fileids()[0]) print(len(newcorpus.words())) tokens = word_tokenize(newcorpus.raw()) #type(tokens) print len(tokens) print tokens[:50] #tokens[:10] print newcorpus.sents() print #to remove comments def removeComments(string): string = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,string) # remove all occurance streamed comments (/*COMMENT */) from string fdf string = re.sub(re.compile("//.*?\n" ) ,"" ,string) # remove all occurance singleline comments (//COMMENT\n ) from string return string print(removeComments(newcorpus.words(newcorpus.raw())))
29ac2f42402d4cc5f03154a33b5f4457ea966279
[ "Python" ]
1
Python
dilip-dmk/vdsc
20a4d46f5d5028d68e5e460bc24d8e30807f95a5
cb82e21c00a68b71606a38cb74418fabe9003aef
refs/heads/master
<file_sep>package com.yu.service; import java.io.IOException; import org.springframework.context.support.ClassPathXmlApplicationContext; import com.yu.service.TestService; /** * Created by ymz_pc on 2017/7/11. */ public class ConsumerServiceTest { public static void main(String[] args) { ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext( new String[] { "application.xml" }); context.start(); TestService testService = (TestService) context.getBean("testService"); System.out.println(testService.getName()); try { System.in.read(); } catch (IOException e) { e.printStackTrace(); } } }
d6ac29992df3e7fe3eecea427b3676e6013e5969
[ "Java" ]
1
Java
yumingzhou94/consumer
bbfce7ba52fccb3423fe18dab4510510367e0af4
11c6623e4cff0ba8bae3cd53c44925d49dcd8a15
refs/heads/master
<file_sep># Investigation ## Describe the different data analysis architectures. ### Hadoop / MapReduce #### MAPREDUCE is a software framework and programming model used for processing huge amounts of data. MapReduce program work in two phases, namely, Map and Reduce. Map tasks deal with splitting and mapping of data while Reduce tasks shuffle and reduce the data. #### Hadoop is an open source software framework for storing data and running applications on commercial hardware clusters. It provides massive storage for any type of data, enormous processing power and the ability to process virtually unlimited concurrent jobs or tasks. ### Bases de datos NoSQL #### When we talk about NoSQL databases, also known as “Not only SQL”, we are referring to a wide class of data management systems (mechanisms for data storage and retrieval) that differ, in important respects, from the classic relationship model. between entities (or tables) existing in relational database management systems, the most prominent being that which does not use SQL as the main query language. ### SGBDR Extendidos #### A relational database management system (RDBMS) is a program that allows you to create, update, and manage a relational database. Most commercial RDBMS use the structured query language (SQL) to access the database, although SQL was invented after the development of the relational model and is not necessary for its use, since they are an extended part of the systems. common. ## characteristics of a data center. #### 1.Security: Data is the fundamental support of any business model in the modern era, so ensuring the security of information is one of the main priorities of companies. On the one hand, it is necessary to take into account the protection measures that will be implemented in the physical environment. In this sense, the access control measures (identification cards, biometrics, etc.) stand out so that only authorized personnel can contact the data center. On the other hand, it is essential to have protection tools such as data encryption. , either by hardware or by software, as well as security solutions against the loss of information and / or corruption in power failures. #### 2. Cooling: Achieving an ideal temperature environment is also a very important point in order to obtain the best possible performance from the data center. Since it must be in continuous operation, it is essential to establish an optimal cooling system to avoid that high temperatures adversely affect the performance, efficiency and useful life of the components. In this scenario, it is necessary to emphasize that the refrigeration system should not only focus on reducing heat and maintaining optimal operating temperatures, but it should also control humidity levels. #### 3. Storage Capacity: As larger amounts of data are being generated, companies demand storage solutions with higher capacities. Thanks to technological advancement, companies have at their disposal storage units (either conventional hard drives or solid state drives) that have increasingly higher capacities that allow this high volume of information to be stored without problems. #### 4. Data transfer: Storage capacity is useless if it cannot be accessed quickly. In this sense, it is essential that the data transfer (in reading or writing) occurs quickly, since this way we promote accessibility to information. Likewise, it is essential that there is an optimal level of information transmission speed between the data center and the rest of the devices is optimal. ## Cloud and Fog data analysis features #### Scalability and elasticity: your computing resources will not be limited to a static capacity. With this functionality of cloud platforms, your systems will adapt to the load they are being subjected to, so the storage or computing capacity of your application will not be exhausted. #### Independence between device and location: Equipment and tedious data center rooms are no longer needed. Cloud computing is characterized by the provision of administration consoles and multiple work environments that can be accessed through a mobile device, your favorite code editor or on your computer, regardless of where you are. located. #### Security: In cloud computing, security is just as good and can even outperform traditional systems. This is, in part, because providers are able to dedicate resources to solving security issues that many customers cannot afford to address. The cloud user is responsible for security at the application level. The cloud provider is responsible for physical security. #### Cost: costs are greatly reduced. A cloud server converts capital expenditures into operating expenses, which results in lower barriers to entry, as the infrastructure is typically provided by a third party and does not have to be purchased once or for tasks. infrequent intensive computing. #### Performance: performance is a key part of this technological model, since all the resources are available to optimize the final result. Multiple integrations are created so that the user is able to constantly monitor and implement corrections that allow obtaining even more capacity from the same resources. #### Maintenance: In the case of cloud computing applications, this process is reduced to the allocation of trained personnel to handle monitoring services. The platform will take care of the rest, since the maintenance to the systems can be configured to happen automatically. This reduces implementation times resulting in a focus on software production. ## data engineering #### its importance is crucial, since it is the phase in which we prepare the data for the subsequent phase, the analytical one. Data engineering comes to establish the standards that any company needs to have its data in a unified, clean and accessible way, responding to the requirements of each business. ## Define the concepts of data acquisition, management, process and administration. ### data management #### Data management is the practice of collecting, maintaining and using data in a safe, efficient and cost-effective manner. The goal of data management is to help people, organizations, and connected objects optimize the use of data within the limits of policy and regulations so that they can make decisions and take actions that maximize the benefit to the data. organization. A robust data management strategy is more important than ever, as organizations increasingly rely on intangible assets to create value. ### data processing #### Data processing occurs when data is collected and translated into usable information. Data scientists are often involved, alone or in teams, and it is important that the processing is done correctly so as not to adversely affect the final product or the results obtained from the data. #### Processing begins with data in its raw form and converts it to a more readable format (graphics, documents, etc.), giving it the form and context necessary for computers to interpret and use by employees across an organization. ### Data acquisition #### Acquiring the data is determining which of them are available. We must spare no effort in researching the appropriate data sources. We want to identify the appropriate data related to our problem and take advantage of all the data that is relevant to the analysis of the problem. Omitting only a small amount of important data can lead to incorrect conclusions. ## Describe the Lambda and Kappa architectures and Describe the layers of each architecture ### Lambda architecture #### The Lambda Architecture represented by the Greek letter, appeared in 2012 and is attributed to <NAME>. He defined it based on his experience in distributed data processing systems during his time as an employee in the Backtype and Twitter companies, and is inspired by his article How to beat the CAP theorem. #### Their goal was to have a robust fault-tolerant system, both human and hardware, that was linearly scalable and that allowed low-latency writes and reads. #### The data entering the system is dispatched to both the batch layer and the speed layer. The batch layer writes the data to the master data set and prepares the batch views, passing them to the server layer. The latter is in charge of indexing the batch views so that it can respond to searches with very low latency. The problem is that the process of writing data and then indexing it is slow, so it is not available instantly; this is where the role of the speed layer comes into play; which is dedicated to exposing only the most recent data, without worrying about writing it to a permanent record. The result of any search can bring together data from both batch layer and velocity layer views. ### Kappa architecture #### The idea is to handle both real-time data processing and continuous reprocessing in a single stream processing engine. That’s right, reprocessing occurs from the stream. This requires that the incoming data stream can be replayed (very quickly), either in its entirety or from a specific position. If there are any code changes, then a second stream process would replay all previous data through the latest real-time engine and replace the data stored in the serving layer. #### This architecture attempts to simplify by only keeping one code base rather than manage one for each batch and speed layers in the Lambda Architecture. In addition, queries only need to look in a single serving location instead of going against batch and speed views. #### The complication of this architecture mostly revolves around having to process this data in a stream, such as handling duplicate events, cross-referencing events or maintaining order- operations that are generally easier to do in batch processing. ## Contrast the advantages and disadvantages of each big data architecture with respect to incremental architectures. #### Fully incremental architectures present problems such as compaction, high consistency of highly available data and lack of tolerance to human failures that require complex solutions, using the Lambda architecture achieves solutions with higher performance and avoiding complexity. #### One of the problems of fully incremental architectures that can be solved without a total change of architecture is that of tolerance to human failure, this is achieved through the use of a store of immutable events, which we call fully incremental architecture with registration, this works for both synchronous and asynchronous architectures. Although human fault tolerance is fixed, the fully incremental architecture with registration fails to solve the other complexities or problems. The Lambda architecture presents a better solution to this type of problem. <file_sep># tareas2 ######links de donde saque las bases de datos (https://github.com/datasets/sea-level-rise/blob/master/data/epa-sea-level.csv)(https://www.aprendemachinelearning.com/articulos_ml/)
39e390a112a9338bdbc94fa2caa484f86ea50d9b
[ "Markdown" ]
2
Markdown
roger5410/tareas2
ac77c47bab8d2eac432a2e3655677ee78f640699
c975b5ae21e9d5f6b30656862395cbeb3646d622
refs/heads/master
<repo_name>abdulqadirfaruk/ML-Iris-flower-dataset<file_sep>/README.md # ML-Iris-flower-dataset Machine Learning with Iris flower dataset "the hello world of machine learning" :-) <file_sep>/IrisFlower.py from pandas import read_csv from pandas.plotting import scatter_matrix from matplotlib import pyplot from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv" # declare/name data headers attributes = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] dataset = read_csv(url, names=attributes) # load data & specify custom header names # dimension of dataset print(dataset.shape) # peek dataset print(dataset.head(20)) # statistical summary print(dataset.describe()) # class distribution - instances of each class print(dataset.groupby('class').size()) # univariate plot to analyse each attribute - box and whisker plot dataset.plot(kind='box', subplots=True, layout=(2, 2), sharex=False, sharey=False) pyplot.show() dataset.hist() pyplot.show() # multvariate plots - analyze interactions betwn the attributes- scatter plot scatter_matrix(dataset) pyplot.show() # creating a validation dataset-split data, 80%(train,evaluate & choose among models), 20% for validating the accuracy array = dataset.values x = array[:, 0:4] y = array[:, 4] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=1, shuffle=True) # Spot Check Algorithms models = [('LR', LogisticRegression(solver='liblinear', multi_class='ovr')), ('KNN', KNeighborsClassifier()), ('CART', DecisionTreeClassifier()), ('NB', GaussianNB()), ('SVM', SVC(gamma='auto'))] # evaluate each model in turn results = [] names = [] for name, model in models: kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True) cv_results = cross_val_score(model, x_train, y_train, cv=kfold, scoring='accuracy') results.append(cv_results) names.append(name) print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std())) # compare the algorithms pyplot.boxplot(results, labels=names) pyplot.title('ALGORITHM COMPARISON') pyplot.show() # make predictions on the dataset using the model with best accuracy model = SVC(gamma='auto') model.fit(x_train, y_train) # fit linear model (training values, target iris-classes) predictions = model.predict(x_test) # predict iris-classes using iris values(x_test) # EVALUATE PREDICTIONS accuracy = accuracy_score(y_test, predictions) * 100 # by comparing true y_test and predicted y_test print('Prediction accuracy: ' + str("%.4f" % accuracy) + '%') acc = accuracy_score(y_test, predictions, normalize=False) print('Number of correctly classified samples: ' + str(acc) + ' of ' + str(y_test.size)) print('Confusion matrix: \n' + str(confusion_matrix(y_test, predictions))) print('Classification report: \n' + str(classification_report(y_test, predictions))) # print predicted y_test values print('Predicted iris-classes:') for x in predictions: print(x)
a6103ffdb1ab1b29faf725f0b8087b0d8f00b1b5
[ "Markdown", "Python" ]
2
Markdown
abdulqadirfaruk/ML-Iris-flower-dataset
3c9d04a543713629b807019a8991d121e0179416
17c9ede3939c8fb8bffc3d677b67e12c865a2c69
refs/heads/master
<file_sep>.button { @include size(28px, 22px); border: 1px solid transparent; border-radius: 4px; margin: 0 auto; &:hover, &:active { border: 1px solid #777; box-shadow: rgba(255, 255, 255, .4) 0 1px; } &:hover { background-image: linear-gradient(#eee, #959595); } &:active { background-image: linear-gradient(#959595, #d5d5d5); box-shadow: inset 0 3px 7px #757575, rgba(white, 0.4) 0 1px; } } #refresh-icon .icon-btn { opacity: .8; &:active { opacity: .9; } } .btn-side-menu-add { $btn-color: #777; position: absolute; right: 0; padding-left: 10px; padding-right: 10px; color: $btn-color; font-size: 16px; font-weight: 600; visibility: hidden; &:active { color: $icon-hover-color; } } .menu-desc:hover .btn-side-menu-add { visibility: visible; } .btn-edit-sub:hover { color: $icon-hover-color; } .comments-button, .btn-general { color: #555; background-image: linear-gradient(whiteSmoke 0%, #CCC 100%); box-shadow: rgba(255, 255, 255, .4) 0 1px 0 inset, rgba(255, 255, 255, .3) 0 25px 30px -12px inset, rgba(0, 0, 0, .6) 0 1px 2px; } .btn-general { background-color: #bbb; padding: 3px 10px; border-radius: 4px; font-weight: 700; text-align: center; border: 0; &:active { background-image: linear-gradient(#3f3f3f 50%, #4b4b4b 100%); box-shadow: 0 2px 7px black inset, rgba(255, 255, 255, .4) 0 1px 0; } &:hover, &:visited { color: #555; } &:active { color: #f5f5f5; } } #btn-save-data { width: 70%; margin: 0 auto; line-height: 20px; } #btn-import-data { width: 175px; } .btn-footer { position: absolute; } .btn-share { @include size(32px, 22px); -webkit-box-flex: 1; margin-right: 10px; display: inline-block; background-image: url(../img/share.png); background-size: 18px; background-repeat: no-repeat; background-position: center; &:active { background-image: url("../img/share.png"); } } .replies-button:active { background-image: linear-gradient(#ccc 0%, #777 100%) !important; color: white !important; } .btn-load-more { padding: 5px 10px; margin: 20px auto; text-align: center; width: 80%; border-radius: 5px; font-size: 14px; font-weight: 700; } .btn-edit-channel, .btn-remove-channel, .btn-remove-subreddit, .btn-add-sub { &:active { background-color: #ddd; } } .comments-button { padding: 3px 10px; border-radius: 4px; font-size: 11px; font-weight: 700; display: block; text-align: center; width: 90px; margin: 0 auto; } <file_sep># Reeddit for Mac This is a version of [Reeddit](https://github.com/berbaquero/reeddit/), built for OS X, using [node-webkit](https://github.com/rogerwang/node-webkit). [Download Reeddit.app](https://github.com/berbaquero/Reeddit-app/releases/download/v1.9.6/Reeddit.app.zip). For now, you will need to update manually - download each new version. I hope to work on a way to auto-update later. # Reeddit * #### Minimalist While aiming for simplicity, Reeddit cares to show you only the most important information from the posts and comments. No upvotes or points. * #### Elastic Reeddit has 3 different elastic layouts - you can use it on any window size. So it's comfortable to use on a smartphone, tablet or desktop. * #### Read-only Being a personal side-project of mine, Reeddit can be used for browsing subreddits and viewing links/post and its comments, but not for voting or commenting... for now ;) -- However, the subreddits and channels you add are saved locally, so you don't lose any of that data. * #### Channels You can group different subreddits into a Channel, so you can browse their links in a single view, instead of having to browse each one separately. This is specially useful when you add subject-related subreddits. For screenshots and additional info, visit [Reeddit's Homepage](http://berbaquero.github.com/reeddit/about). ## Tools To build Reeddit, I used these awesome resources: * [pagedown](http://code.google.com/p/pagedown/) -- Client-side Markdown-to-HTML conversion. * [Mustache.js](https://github.com/janl/mustache.js/) -- Lightweigth client-side templating. * [reziseend.js](https://github.com/porada/resizeend) -- Better 'resize' handler. * [Iconmonstr](http://iconmonstr.com/) -- Awesome icons. ### Compatibility Reeddit.app works on OS X 10.7 and up. ### License Licensed under the [MIT License](http://berbaquero.mit-license.org/). <file_sep>doctype html html head title &#8203; meta(charset="utf-8") link(rel="stylesheet", href="css/normalize.css") link(rel="stylesheet", href="css/reeddit.css") link(rel="stylesheet", href="css/desk.css") body header div#main-title p#sub-title section#nav-back.corner.invisible div#back-arrow.button.btn-to-main div.icon-btn div#title-head div#header-icon p#title section.corner div#refresh-icon.button.btn-refresh div.icon-btn nav#menu-container div.view div#edit-subs span#btn-add-subs.btn-edit-sub + span#btn-edit-subs.btn-edit-sub ••• div#main-menu.wrapper div.menu-desc Subreddits span#btn-new-sub.btn-side-menu-add + ul#subs div.menu-desc Channels span#btn-new-channel.btn-side-menu-add + ul#channels div.menu-desc Sorting div#sorting p.sort-option.sorting-choice hot p.sort-option new p.sort-option controversial p.sort-option top div.menu-desc Options div#imp-exp.option Import & Export Data section.view.main-view.show-view section#main-wrap.wrapper div#main-footer.view-footer p#footer-sub div(class="btn-footer footer-refresh btn-refresh", data-origin="footer-main") section.view.detail-view section#detail-wrap.wrapper div#detail-footer.view-footer div#detail-close.btn-footer.hide.btn-to-main p#footer-post div(class="btn-footer footer-refresh btn-refresh hide", data-origin="footer-detail") script(src="js/libs.js") script(src="js/dom.js") script(src="js/reeddit.js") script(src="js/sharing.js") <file_sep>$gray-yos: #d2d2d2; $header-height-yos: 36px; .yosemite { header { background: { color: $gray-yos; image: none; } border: { top: 0; bottom: 1px solid #b3b3b3; } box-shadow: none; height: $header-height-yos; } #title-head { -webkit-box-align: baseline; } #main-title { top: 7px; } #title { margin-top: 5px; } #sub-title { line-height: 20px; &:hover { background-color: #f2f2f2; } &:active { box-shadow: none; background-color: #bbb; } } .view { top: $header-height-yos; } .corner { -webkit-box-align: baseline; } .button { background-image: none; border: 0; margin-top: 4px; &:hover { background-color: white; box-shadow: 0 1px 0 #aaa; } &:active { background-color: #ddd; box-shadow: inset 0 1px 0 #f2f2f2, 0 1px 0 #aaa; } } .comments-button { box-shadow: none; border: 1px solid #bbb; background-image: none; color: #999; font-weight: 400; &:hover { background-color: #eee; } &:active { background: { image: none; color: #bbb; } } } .replies-button:active { background-image: none !important; } .comment-poster::before { border-radius: 3px; color: white; background-color: #cf4f5b; } &.inactive { header { background-color: #f5f5f5; } } @media only screen and (min-width: 1000px) { .view { top: 0; } } } <file_sep>var doc = window.document; // Selection function $id(id) { // id: String - element id return doc.getElementById(id); } function $q(q) { // q: String - selector query return doc.querySelector(q); } function $qAll(q) { // q: String - selector query return doc.querySelectorAll(q); } // Manipulation function $el(el, c, id) { // el: String - html element var n = doc.createElement(el); if (c) n.className = c; if (id) n.id = id; return n; } function $append(par, chi) { // par: DOM Node - the container/parent, chi: DOM Node or String - the appendee/child if (typeof chi === "string") { var el = $el("div"); el.innerHTML = chi; while (el.childNodes.length > 0) { par.appendChild(el.childNodes[0]); } } else { par.appendChild(chi); } } function $prepend(par, chi) { // par: DOM Node - the container/parent, chi: DOM Node or String - the prependee/child if (typeof chi === "string") { var el = $el("div"); el.innerHTML = chi; while (el.childNodes.length > 0) { par.insertBefore(el.childNodes[0], par.childNodes[0]); } } else { par.insertBefore(chi, par.childNodes[0]); } } function $html(el, html) { el.innerHTML = html; } function $empty(el) { $html(el, ""); } function $text(el, s) { el.textContent = s; } function $remove(el) { if (el) el.parentNode.removeChild(el); } // Classes function $addClass(el, c) { if (el) el.classList.add(c); } function $removeClass(el, c) { if (el) el.classList.remove(c); } // Events function clickable(el, delegatedSelector, callback) { el.addEventListener("click", function(e) { var del = delegation(e, delegatedSelector); if (del.found) { e.preventDefault(); callback(del.target); } }); } function delegate(el, delegatedSelector, eventName, callback) { el.addEventListener(eventName, function(e) { var del = delegation(e, delegatedSelector); if (del.found) { callback(del.ev, del.target); } }); } function delegation(e, selector) { var currentTarget = e.target, found = false, isID = selector.charCodeAt(0) === 35, // # keyword = selector.substring(1); while (currentTarget) { var discriminator = false; if (isID) { discriminator = currentTarget.id === keyword; } else { if (currentTarget.className) { discriminator = currentTarget.className.split(" ").indexOf(keyword) >= 0; } } if (discriminator) { found = true; break; } else { currentTarget = currentTarget.parentNode; } } return { "found": found, "target": currentTarget, "ev": e }; } <file_sep>var sharing = (function() { var scriptString = 'tell application "Safari" to add reading list item "{{URL}}"', applescript, clipboard; var getAppleScript = function() { if (!applescript) { applescript = require('applescript'); } return applescript; }; var getClipboard = function() { if (!clipboard) { var gui = require('nw.gui'); clipboard = gui.Clipboard.get(); } return clipboard; }; var addToReadingList = function(url, next) { var script = scriptString.replace('{{URL}}', url); getAppleScript().execString(script, function(err, succ) { if (next) next(); }); }; var copyToClipboard = function(url) { getClipboard().set(url, 'text'); }; return { readingList: addToReadingList, clipboard: copyToClipboard }; })();<file_sep>.inactive { header, .view-footer { background-image: linear-gradient(#e5e5e5, #d5d5d5); box-shadow: none; } header { border-color: #999; } #header-icon, #main-title, #title, #footer-sub, #footer-post, .btn-footer, .icon-btn { opacity: .7; } #detail-close { opacity: .5; } } <file_sep>.notification { background-color: rgba(0, 0, 0, .8); color: white; border-radius: 4px; text-align: center; line-height: 30px; width: 200px; padding: 0 10px; position: relative; top: 50px; left: 50%; margin-left: -100px; margin-bottom: 10px; z-index: 11; }
0621015992ab45500d909a5685ceebc2b07ec17c
[ "Markdown", "SCSS", "JavaScript", "Pug" ]
8
Markdown
berbaquero/Reeddit-app
eb353f24f1b8a57210b5bfdd8935358369b05743
1beffc16dffb9b84af4f812bc9243c9395838f57
refs/heads/master
<repo_name>fervargas94/ann<file_sep>/percepton.py import fileinput import random ''' /******************************** Activation function treshold @params List inputs @return 0 or 1 ********************************* ''' def tresholdFunction(inputs): tres = 0 #print(inputs) for i in range(0, len(inputs)): tres += inputs[i] * w[i] return 1 if tres > treshold else 0 ''' /******************************** Update weights @params List inputs @return error ********************************* ''' def getWeight(train): output = train[1] calculated = tresholdFunction(train[0]) #print("calculated", calculated, "outpu", output) error = output - calculated for index, val in enumerate(w): w[index] += error * train[0][index] return error training = [] test = [] w = [] #Parser count = 0 for line in fileinput.input(): if count == 0: d = int(line) elif count == 1: m = int(line) elif count == 2: n = int(line) elif count >= 3 and count < 3 + m: line = (line.rstrip('\n').rstrip('\r')).replace(" ", "") line = line.split(",") arr = ([float(value) for value in line[0:-1]]) data = ((arr), float(line[-1])) training.append(data) else: line = (line.rstrip('\n').rstrip('\r')).replace(" ", "") line = line.split(",") arr = ([float(value) for value in line]) test.append(arr) count += 1 #Generate random weights for i in range(d): w.append(random.randrange(0, 1)) rounds = 100 treshold = random.randrange(1, 10); #Until 100 rounds or error = 0 while (rounds >= 0): rounds -= 1 error = 0 for train in training: #http://lcn.epfl.ch/tutorial/english/perceptron/html/learning.html error += pow(getWeight(train), 2) if (error == 0): break if error >= 1: print("no solution found") else: for val in test: print(tresholdFunction(val))
597752c70e3807704e016db6d0662b086987f274
[ "Python" ]
1
Python
fervargas94/ann
299185d6671a155f851f54f45fff93282d292710
21b2797af4c72a70b3213a779cdca4052312fe10
refs/heads/master
<repo_name>sugiantodenny/mysql-data-cuti<file_sep>/prosescuti.php <?php session_start(); if (isset($_SESSION['user'])){ $SERVER = 'localhost'; $DATABASE= '2016'; $username = 'root'; $pass = ''; $conn= mysqli_connect($SERVER,$username,$pass,$DATABASE); if (!$conn) { die("Connection failed :" . mysqli_connect_error()); }else{ $tglawal=$_POST["tglawal"]; $tglakhir=$_POST["tglakhir"]; $berhasil = 0; echo $tglawal." ".$tglakhir; $user=$_SESSION["user"]; $cek_data = "SELECT * FROM datacuti WHERE tglawal='$tglawal' OR tglakhir='$tglakhir'"; $query_check = mysqli_query($conn, $cek_data); $res = mysqli_num_rows($query_check); echo $res; if ($res <= 0){ $query="INSERT INTO datacuti VALUES ('','$user','$tglawal','$tglakhir')ON duplicate KEY UPDATE tglawal=$tglawal AND tglakhir=$tglakhir"; $result=mysqli_query($conn,$query); $berhasil = 1; } if ($berhasil == 1){ echo "Data berhasil disimpan"; }else{ echo "Data redundant"; } } }else{ echo "<a href=index.php>Silahkan Login</a>"; } //2016-08-19 //2016-08-26 <file_sep>/2016.sql -- phpMyAdmin SQL Dump -- version 4.5.1 -- http://www.phpmyadmin.net -- -- Host: 127.0.0.1 -- Generation Time: Nov 14, 2016 at 11:07 AM -- Server version: 10.1.16-MariaDB -- PHP Version: 5.6.24 SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO"; SET time_zone = "+00:00"; /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; /*!40101 SET NAMES utf8mb4 */; -- -- Database: `2016` -- -- -------------------------------------------------------- -- -- Table structure for table `datacuti` -- CREATE TABLE `datacuti` ( `id` int(20) NOT NULL, `user` varchar(50) NOT NULL, `tglawal` datetime NOT NULL, `tglakhir` datetime NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -- -- Dumping data for table `datacuti` -- INSERT INTO `datacuti` (`id`, `user`, `tglawal`, `tglakhir`) VALUES (1, 'windra', '2016-11-18 00:00:00', '2016-11-25 00:00:00'), (2, 'windra', '2016-11-18 00:00:00', '2016-11-25 00:00:00'); -- -------------------------------------------------------- -- -- Table structure for table `username` -- CREATE TABLE `username` ( `user` varchar(50) NOT NULL, `password` varchar(50) NOT NULL, `namalengkap` varchar(50) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -- -- Dumping data for table `username` -- INSERT INTO `username` (`user`, `password`, `<PASSWORD>`) VALUES ('windra', '123', '<PASSWORD>'); -- -- Indexes for dumped tables -- -- -- Indexes for table `datacuti` -- ALTER TABLE `datacuti` ADD PRIMARY KEY (`id`); -- -- Indexes for table `username` -- ALTER TABLE `username` ADD PRIMARY KEY (`user`); -- -- AUTO_INCREMENT for dumped tables -- -- -- AUTO_INCREMENT for table `datacuti` -- ALTER TABLE `datacuti` MODIFY `id` int(20) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=3; /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; <file_sep>/index.php <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Title</title> </head> <style> </style> <body> <h2>Form Login</h2> <form method="post" action="proseslogin.php"> <table> <tr> <td>User: </td> <td><input type="text" name="user"></td> </tr> <tr> <td>Password: </td> <td><input type="text" name="pass"></td> </tr> </table> <input type="submit" name="submit"> </form> </body> </html> <file_sep>/cuti.php <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Cuti</title> </head> <style> </style> <body> <?php session_start(); if (!isset($_SESSION['user'])){ echo "Harap Login"; echo "<a href='index.php'>"; }else{ echo " ".($_SESSION['user']); } ?> <h2>Form Login</h2> <form method="post" action="prosescuti.php"> <table> <tr> <td>Tanggal awal: </td> <td><input type="text" name="tglawal"></td> </tr> <tr> <td>Tanggal akhir: </td> <td><input type="text" name="tglakhir"></td> </tr> </table> <input type="submit" name="submit" value="simpan"> </form> </body> </html> <file_sep>/proseslogin.php <?php session_start(); $SERVER = 'localhost'; $DATABASE= '2016'; $username = 'root'; $pass = ''; $conn= mysqli_connect($SERVER,$username,$pass,$DATABASE); if (!$conn){ die("Connection failed :" .mysqli_connect_error()); }//else // echo "koneksi berhasil"; echo "<br>"; $user=$_POST["user"]; $pass=$_POST["pass"]; $query="SELECT * FROM username WHERE user='".$user." ' "; $result=mysqli_query($conn,$query); $a=mysqli_num_rows($result); if($result){ $row=mysqli_fetch_row($result); if ($row[1]== $pass){ echo "login berhasil". $row[2]; echo "<br>"; echo "Menu"; echo "<br>"; echo "<a href='cuti.php' >1.Masukkan data cuti</a>"; echo "<br>"; echo "<a href='history.php'>2.Tampilkan History</a>"."<br>"; echo "<a href='update.php'>3.update</a>"; $_SESSION['user']=$user; }else{ echo "Kombinasi username dan password salah";//user ada tapi password salah } }else{ echo "Kombinasi username dan password salah"; } ?>
cbcbaac2f0322e9be27cdc284f248e09b015befa
[ "Hack", "SQL", "PHP" ]
5
Hack
sugiantodenny/mysql-data-cuti
1b745b7ad3b75d496a0a77ac8eb86712d60a0a9a
70a32309a3228c0183a34db01cae6d59f84db450
refs/heads/master
<repo_name>cicelle/tvmaze<file_sep>/episodes-count-watchlist.user.js // ==UserScript== // @name Count episodes Watchlist // @namespace TVMaze // @version 1.5.6 // @description Reorganize the complete the most|least order. Also change the display // @author cicelle // @match http://www.tvmaze.com/watchlist* // @match https://www.tvmaze.com/watchlist* // @downloadURL https://raw.githubusercontent.com/cicelle/tvmaze/master/episodes-count-watchlist.user.js // @license The MIT License (MIT) // ==/UserScript== (function() { $('head').append('<style>'+ '#site-header+div+div>div{display:flex;flex-direction:column}'+ '#site-header+div+div>div>br,.watchlist-show+hr{display:none}'+ '#site-header+div+div>div>br+div{order:0}'+ '#site-header+div+div>div>div+div{order:1}'+ '.episode-list{position:relative;margin-bottom:30px}'+ '.episode-list .watched-eps{max-width:105px;width:105px;font-size:14px;line-height:14px;margin-bottom:10px;padding-right:0;text-align:left}'+ '.episode-list .watched-eps span{float:right}'+ '.episode-list .progress{position:absolute;top:3px;left:110px;right:65px;width:auto;height:10px;margin-bottom:0}'+ '.season+a.button.negative{position:absolute;top:0px;right:0;font-size:.9em;border:none;padding:0;border-radius:0;}'+ 'h2{color:#ccc}'+ 'h2 a{font-weight:400}'+ 'h2 a:last-child{font-size:.5em}'+ '@media screen and (max-width:400px){'+ 'h2{margin-bottom:30px;}'+ '.episode-list .progress{top:-20px;left:0;right: 0;max-width: 100%;}'+ 'h2 a:last-child{font-size:.6em}'+ '}'+ '</style>'); var sort = $('[name=sort] :selected').html(); var tunseen = [], tdate = [], tf = [], t; /********************** COUNTER : set unseen count to each show **********************/ function counter(selector){ var c = $(selector).html().trim().split(' / '); var unseen = parseInt(c[1]) - parseInt(c[0]); var percent = ((parseInt(c[0]) / parseInt(c[1]) )*100) + '%'; $(selector).append('<span>['+unseen+']</span>').parent().parent().attr('data-unseen', unseen); $(selector).next().find('.progress-meter').css('width', percent); } /********************** DATADATE : set the date of the first episode as an attribute for each show **********************/ function datadate(selector){ var y = $(selector).find('header + .episode-row > div:nth-child(2)').text().trim().split(', '); var m = y[0].split(' '); var d = parseInt(m[1]); if(d<10) d = '0'+d; var monthLabel = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']; m = monthLabel.indexOf(m[0]); if(m<10) m = '0'+m; $(selector).attr('data-date', y[1]+m+d); } /********************** set the order for one show **********************/ function position(){ var num = 1; tf.forEach(function(e){ $('[data-show_id='+e+']').parent().css({ 'order': num++ }); }); } /********************** to each show : - add 'episode list' link - use datadate (set the date of the first episode as an attribute for each show) - use counter (set unseen count to each show) - build arrays sorted by unseen and date **********************/ $('#filter').nextAll().each(function(){ $(this).find('h2').append(' | <a href="'+$(this).find('h2 a').attr('href')+'/episodes?all=1">Episodes list</a>'); datadate( $(this).find('.episode-list') ); counter( $(this).find('.watched-eps') ); var id = parseInt($(this).find('.episode-list').attr('data-show_id')); var i = parseInt($(this).find('.episode-list').attr('data-unseen')); var j = parseInt($(this).find('.episode-list').attr('data-date')); if(!tunseen[i]) tunseen[i] = []; tunseen[i].push(id); if(!tdate[j]) tdate[j] = []; tdate[j].push(id); }); /********************** change the display order to match logical unseen order **********************/ if( sort == 'Completed the most' || sort == 'Completed the least' || sort == 'Aired least recently' || sort == 'Aired most recently'){ if (sort == 'Completed the most' || sort == 'Completed the least') t = tunseen; else t = tdate; t.forEach(function(e){ e.forEach(function(f){ tf.push(f); }); }); if(sort == 'Completed the least' || sort == 'Aired most recently' ) tf.reverse(); position(); } /********************** create the observer **********************/ var MutationObserver = window.MutationObserver || window.WebKitMutationObserver || window.MozMutationObserver; var observer = new MutationObserver(function(mutations) { var oSelector = $(mutations[0].target).find('.watched-eps'); var oC = $(oSelector).html().split(' / '); var oUnseen = parseInt(oC[1]) - parseInt(oC[0]); var oPercent = ((parseInt(oC[0]) / parseInt(oC[1]) )*100) + '%'; $(oSelector).next().find('.progress-meter').css('width', oPercent); if($('[data-unseen]').length != 0){ $(oSelector).attr('data-unseen', oUnseen).append('<span>['+oUnseen+']</span>'); } }); /********************** add the observers **********************/ for(var j = 0; j < $('.episode-list').length; j++){ observer.observe($('.episode-list')[j], { childList: true }); } })(); <file_sep>/watchlist-buttons.user.js // ==UserScript== // @name Buttons Watchlist // @namespace TVMaze // @version 1.5 // @description Adding buttons for status on watchlist and shows episodes page // @author cicelle // @match http://www.tvmaze.com/watchlist* // @match http://www.tvmaze.com/shows/*/episodes* // @match https://www.tvmaze.com/watchlist* // @match https://www.tvmaze.com/shows/*/episodes* // @downloadURL https://raw.githubusercontent.com/cicelle/tvmaze/master/watchlist-buttons.user.js // @license The MIT License (MIT) // ==/UserScript== /* Now useless, due to UI improvments. (function() { var path = window.location.pathname.split('/')[1]; var l = $('.watchlist-show').length; var unseen = false; $('head').append('<style>'+ '.buttons{padding:5px 10px; cursor:pointer;display:block;text-align:center;}'+ 'th:nth-child(2){min-width:110px !important;}'+ 'th:nth-last-child(-n+4){text-align:center;min-width: 40px !important;}'+ 'td:nth-last-child(-n+4){padding: 0;}'+ 'table tr td,.watch-list tr td:nth-child(3){padding:1px 5px;}'+ '.checked{background:#ccc;}'+ '</style>'); $('body').on('click', '.buttons', function(){ var type = $(this).attr('data-type'); var select = $(this).parents('tr').find('select'); select.val(type); $(select).trigger('change'); if(path == 'shows'){ $(this).parents('tr').find('.checked').removeClass('checked'); $(this).parent().addClass('checked'); } else if(type == 1 && path == 'watchlist'){ $(this).parents('tr').find('td + td + td +td').removeClass('checked'); $(this).parents('tr').find('td:nth-child(6)').addClass('checked'); } }); function addcolumns(selector){ $(selector).each(function(){ $(this).find('tbody tr').each(function(){ $(this).append('<td><span data-type="2" class="buttons"><i class="fa fa-ban"></i></span></td>'+ '<td><span data-type="" class="buttons"><i class="fa fa-eye-slash"></i></span></td>'+ '<td><span data-type="1" class="buttons"><i class="fa fa-cloud"></i></span></td>'+ '<td><span data-type="0" class="buttons"><i class="fa fa-eye"></i></span></td>'); }); $(this).find('thead tr').each(function(){ $(this).append('<th>S</th><th>U</th><th>A</th><th>W</th>'); }); $(this).find('tbody select').each(function(){ var val = $(this).val(); if(val == 1){ $(this).parent().next().next().next().addClass('checked'); }else if(val == ''){ $(this).parent().next().next().addClass('checked'); }else if(val == 2){ $(this).parent().next().addClass('checked'); }else if(val == 0){ $(this).parent().next().next().next().next().addClass('checked'); } }); }); } if(path == 'shows'){ addcolumns('table'); }else if(path == 'watchlist'){ $('[data-show_id]').each(function(){ var i = $(this).attr('data-show_id'); addcolumns('[data-show_id='+i+'] table'); }); } var MutationObserver = window.MutationObserver || window.WebKitMutationObserver || window.MozMutationObserver; var observer = new MutationObserver(function(mutations) { addcolumns($(mutations[0].target)); console.log($('[data-unseen]').length); var selector = $(mutations[0].target).find('.watched-eps'); var c = $(selector).html().split(' / '); var unseen = parseInt(c[1]) - parseInt(c[0]); var percent = ((parseInt(c[0]) / parseInt(c[1]) )*100) + '%'; $(selector).next().find('.meter').css('width', percent); if($('[data-unseen]').length != 0){ $(selector).attr('data-unseen', unseen).append('<span>['+unseen+']</span>'); } }); for(var j = 0; j < l; j++){ observer.observe($('.watchlist-show')[j], { childList: true }); } setTimeout(function(){ unseen = ($('[data-unseen]').length != 0); }, 500); })();*/<file_sep>/calendar-navigation.user.js // ==UserScript== // @name Calendar navigation // @namespace TVMaze // @version 1.1 // @description Navigation buttons // @author cicelle // @include http://www.tvmaze.com/calendar* // @include https://www.tvmaze.com/calendar* // @downloadURL https://raw.githubusercontent.com/cicelle/tvmaze/master/calendar-navigation.user.js // @license The MIT License (MIT) // ==/UserScript== (function() { 'use strict'; $('head').append(`<style> .calendar{margin-bottom: 0} #calendar-footer{background-color: #ebebeb; padding: 0.5rem 0;} #calendar-footer #month{font-size: 1.3rem;font-family: open sans,sans-serif;font-weight: 300} </style>`); /**************************** add another calendar navigation *****************************/ if($('#calendar-header').length){ let calfooter = '<article id="calendar-footer" class="grid-x align-middle align-center">'+$('#calendar-header').html()+'</article>'; $('#calendar-wrap .card').append(calfooter) } })();
5eccbd6460434463a343efb918bc9f56e0530c68
[ "JavaScript" ]
3
JavaScript
cicelle/tvmaze
aed11ac8c506da8a7f14e363ca0585013375ec82
81971e534e294dd6a896c4a31652d1236192aa2d
refs/heads/master
<repo_name>GiveUpAtFirst/CityChooseDialog<file_sep>/README.md # CityChooseDialog a dialog you can choose province/city/town at it 一个地址选择弹窗,仿京东 <file_sep>/citychoise/src/main/java/www/com/citychoise/base/App.java package www.com.citychoise.base; import android.app.Application; import android.content.Context; import android.database.sqlite.SQLiteDatabase; import mmt.mq.green.dao.DaoMaster; import mmt.mq.green.dao.DaoSession; import www.com.citychoise.helper.DataBaseOpenHelper; /** * Created by hp on 2016/6/8. */ public class App extends Application{ private static Context mContext; public DaoSession daoSession; public SQLiteDatabase db; public DaoMaster.DevOpenHelper helper; public DaoMaster daoMaster; public static Context getContext() { return mContext; } @Override public void onCreate() { // 1.上下文 mContext = getApplicationContext(); setupDatabase(); super.onCreate(); } private void setupDatabase() { //对地址的 helper = new DataBaseOpenHelper(this,"DB", null); db = helper.getWritableDatabase(); // 注意:该数据库连接属于 DaoMaster,所以多个 Session 指的是相同的数据库连接。 daoMaster = new DaoMaster(db); daoSession = daoMaster.newSession(); } public DaoSession getDaoSession() { return daoSession; } public SQLiteDatabase getDb() { return db; } public static App getInstance(Context context){ return (App)context.getApplicationContext(); } }
52f4e58100d3d82914873c411050e6793fb126b8
[ "Java", "Markdown" ]
2
Java
GiveUpAtFirst/CityChooseDialog
9c93617438cadaee1532bd4d71ca346d9dea0d57
4726089721964bf8f79c5278daa04876b419d252
refs/heads/master
<file_sep>import React, { useState, useEffect, useCallback, useContext } from 'react'; import MuiAlert from '@material-ui/lab/Alert'; import { makeStyles, Divider, Snackbar, IconButton, List, Hidden, TextField, Fab } from '@material-ui/core'; import AddIcon from '@material-ui/icons/Add'; import ArrowBackIosIcon from '@material-ui/icons/ArrowBackIos'; import { FetchItemsContext } from '../../../context/fetch-items-context'; import axios from '../../../shared/backendAxios'; import { isUrl } from '../../../shared/validations'; import Spinner from '../../Spinner/Spinner'; import Item from './Item/Item'; import styles from './Drawer.module.css'; const useStyles = makeStyles(theme => ({ toolbar: { ...theme.mixins.toolbar, [theme.breakpoints.down('md')]: { display: 'flex', justifyContent: 'flex-end', alignItems: 'center', }, }, })); const updateTextField = func => ({ target: { value } }) => { func(value); }; const DrawerContent = props => { const classes = useStyles(); const [rssFeeds, setRssFeeds] = useState([]); const [rssFeedsIsChecked, setRssFeedsIsChecked] = useState([]); const [isFetched, setIsFetched] = useState(false); const fetchItemsContext = useContext(FetchItemsContext); const [isOpenSnackBar, setIsOpenSnackBar] = useState(false); const [alertMessage, setAlertMessage] = useState(''); const [snackBarColor, setSnackBarColor] = useState('success'); const [rssLink, setRssLink] = useState(''); const [isRssLinkValid, setIsRssLinkValid] = useState(true); const [rssLinkHelperText, setRssLinkHelperText] = useState(''); const setSnackBarOptions = useCallback((isOpen, message, snackBarType) => { setIsOpenSnackBar(isOpen); setAlertMessage(message); setSnackBarColor(snackBarType); }, []); const onCloseSnackBar = useCallback(() => { setIsOpenSnackBar(false); }, []); const isUrlValid = useCallback(() => { if (isUrl(rssLink)) { setIsRssLinkValid(true); setRssLinkHelperText(''); } else if (rssLink) { setIsRssLinkValid(false); setRssLinkHelperText('Invalid URL(link)'); } }, [rssLink]); const sendGetItemsRequests = useCallback( (isRssFeedsChecked, force) => { const feedIds = []; for (let i = 0; i < rssFeeds.length; i += 1) { const rssFeed = rssFeeds[i]; if (isRssFeedsChecked[i]) { feedIds.push(rssFeed.id); } } fetchItemsContext.changeFeedIds(feedIds.join(';'), force); }, [fetchItemsContext, rssFeeds], ); const onClickCheckBox = useCallback( index => { const newRssFeedsIsChecked = [...rssFeedsIsChecked]; newRssFeedsIsChecked[index] = !newRssFeedsIsChecked[index]; setRssFeedsIsChecked(newRssFeedsIsChecked); sendGetItemsRequests(newRssFeedsIsChecked); }, [rssFeedsIsChecked, sendGetItemsRequests], ); const fetchData = useCallback( async id => { try { await axios.get(`fetch/${id}`); sendGetItemsRequests(rssFeedsIsChecked, true); setSnackBarOptions(true, 'Data fetched successfully', 'success'); } catch (error) { setSnackBarOptions(true, 'Error getting feeds', 'error'); } }, [setSnackBarOptions, rssFeedsIsChecked, sendGetItemsRequests], ); const addLink = useCallback( async url => { try { setIsFetched(false); const response = await axios.post('/feed/add', { url, }); setRssFeeds(rssFeeds.concat({ id: response.data.id, url: url, title: response.data.title })); setRssFeedsIsChecked(rssFeedsIsChecked => rssFeedsIsChecked.concat(false)); sendGetItemsRequests(rssFeedsIsChecked); setSnackBarOptions(true, 'Successfully added', 'success'); setIsRssLinkValid(true); setRssLinkHelperText(''); setRssLink(''); } catch (error) { setIsRssLinkValid(false); setRssLinkHelperText(error.response.data.message); } finally { setIsFetched(true); } }, [rssFeeds, setSnackBarOptions, rssFeedsIsChecked, sendGetItemsRequests], ); const onClickDeleteButton = useCallback( async id => { try { const index = rssFeeds.findIndex(rssFeed => rssFeed.id === id); rssFeeds.splice(index, 1); rssFeedsIsChecked.splice(index, 1); setRssFeeds(rssFeeds); setRssFeedsIsChecked(rssFeedsIsChecked); sendGetItemsRequests(rssFeedsIsChecked); await axios.delete(`/feeds/${id}`); setSnackBarOptions(true, 'Successfully deleted', 'success'); } catch (error) { setSnackBarOptions(true, 'Error deleting feed', 'error'); } }, [setSnackBarOptions, rssFeeds, rssFeedsIsChecked, sendGetItemsRequests], ); const onClickAddButton = useCallback(() => { if (isUrl(rssLink)) { addLink(rssLink); } else { setIsRssLinkValid(false); setRssLinkHelperText('Invalid URL(link)'); } }, [rssLink, addLink]); const getFeeds = useCallback(async () => { try { const response = await axios.get('feeds'); const newRssFeedsIsChecked = Array(response.data.length).fill(false); setRssFeedsIsChecked(newRssFeedsIsChecked); setRssFeeds(response.data); setIsFetched(true); } catch (error) { setSnackBarOptions(true, 'Error getting feeds', 'error'); } }, [setSnackBarOptions]); useEffect(() => { getFeeds(); }, [getFeeds]); return ( <React.Fragment> <div className={classes.toolbar}> <Hidden mdUp implementation="css"> <div> <IconButton aria-label="refresh" color="primary" onClick={props.onClickIconButton}> <ArrowBackIosIcon /> </IconButton> </div> </Hidden> </div> <Divider /> <div className={styles.addContainer}> <TextField label="RSS feed link" type="url" value={rssLink} onBlur={isUrlValid} onChange={updateTextField(setRssLink)} error={!isRssLinkValid} helperText={rssLinkHelperText} /> <Fab color="primary" aria-label="add" onClick={onClickAddButton}> <AddIcon /> </Fab> </div> <Divider /> <List> {!isFetched && <Spinner />} {isFetched && rssFeeds.map((rssFeed, index) => { return ( <Item key={`item-${index}`} isChecked={rssFeedsIsChecked[index]} rssFeed={rssFeed} index={index} onClickCheckBox={onClickCheckBox} onClickRefresh={fetchData} onClickDelete={onClickDeleteButton} /> ); })} </List> <Snackbar open={isOpenSnackBar} autoHideDuration={6000} onClose={onCloseSnackBar}> <MuiAlert elevation={6} variant="filled" color={snackBarColor} onClose={onCloseSnackBar}> {alertMessage} </MuiAlert> </Snackbar> </React.Fragment> ); }; export default DrawerContent; <file_sep>import React from 'react'; import { ThemeProvider } from '@material-ui/core'; import theme from './theme'; import SideBar from './components/SideBar/SideBar'; import FeedItems from './components/FeedItems/FeedItems'; import styles from './App.module.css'; import FetchItemsContextProvider from './context/fetch-items-context'; function App() { return ( <div className={styles.container}> <ThemeProvider theme={theme}> <FetchItemsContextProvider> <SideBar /> <FeedItems /> </FetchItemsContextProvider> </ThemeProvider> </div> ); } export default App; <file_sep>## Installation Before Installation make sure that in the file 'src/shared/backendAxios.js' you have set your backend API URL: ```bash import Axios from 'axios'; export default Axios.create({ baseURL: 'http://localhost:2001/', }); ``` Steps to install: - Step 1, install all dependencies: ```bash npm install ``` - Step 2, run the project (the project is running on port 3000): ```bash npm start ``` <file_sep>import React from 'react'; import PropTypes from 'prop-types'; import { Typography, Card } from '@material-ui/core'; import styles from './FeedItem.module.css'; const FeedItem = props => { const { title, date, content, link, rssFeedTitle } = props; return ( <Card className={styles.card}> <div className={styles.titleContainer}> <a href={link} className={styles.anchorTitle}> <span className={styles.title}>{title || 'No title'}</span> </a> <div className={styles.rssFeedTitle}>{rssFeedTitle || 'No rss feed title'}</div> </div> <span className={styles.date}>{date || 'no date'}</span> <Typography variant="body2">{content || 'No content'}</Typography> </Card> ); }; FeedItem.propTypes = { title: PropTypes.string.isRequired, date: PropTypes.string.isRequired, content: PropTypes.string.isRequired, link: PropTypes.string.isRequired, rssFeedTitle: PropTypes.string.isRequired, }; export default FeedItem; <file_sep>import React, { useState } from 'react'; export const FetchItemsContext = React.createContext({ feedIds: '', changeToFetch: () => {}, }); const FetchItemsContextProvider = props => { const [feedIds, setFeedIds] = useState(''); const [forceRefresh, setForceRefresh] = useState(true); const changeHandler = (feedIds, force) => { setFeedIds(feedIds); if (force) { setForceRefresh(!forceRefresh); } }; return ( <FetchItemsContext.Provider value={{ changeFeedIds: changeHandler, feedIds: feedIds }}> {props.children} </FetchItemsContext.Provider> ); }; export default FetchItemsContextProvider; <file_sep>import React, { useCallback, useState } from 'react'; import { makeStyles, Hidden, Drawer } from '@material-ui/core'; import { SIDEBAR_WIDTH } from '../../shared/constants'; import ApplicationBar from '../ApplicationBar/ApplicationBar'; import DrawerContent from './DrawerContent/DrawerContent'; const useStyles = makeStyles(theme => ({ drawerPaper: { width: SIDEBAR_WIDTH, [theme.breakpoints.down('sm')]: { width: SIDEBAR_WIDTH - 50, }, }, container: { width: SIDEBAR_WIDTH, flexShrink: 0, [theme.breakpoints.down('md')]: { width: 0, }, [theme.breakpoints.up('md')]: { width: SIDEBAR_WIDTH, }, }, })); const SideBar = () => { const classes = useStyles(); const [isOpenOnMobile, setIsOpenOnMobile] = useState(false); const toggleIsOpenOnModal = useCallback(() => { setIsOpenOnMobile(!isOpenOnMobile); }, [isOpenOnMobile]); return ( <div className={classes.container}> <ApplicationBar onItemButtonClick={toggleIsOpenOnModal} /> <Hidden smDown implementation="css"> <Drawer variant="permanent" open classes={{ paper: classes.drawerPaper }}> <DrawerContent onClickIconButton={toggleIsOpenOnModal} /> </Drawer> </Hidden> <Hidden mdUp implementation="css"> <Drawer variant="temporary" open={isOpenOnMobile} onClose={toggleIsOpenOnModal} classes={{ paper: classes.drawerPaper }} > <DrawerContent onClickIconButton={toggleIsOpenOnModal} /> </Drawer> </Hidden> </div> ); }; export default SideBar; <file_sep>import React, { useState, useCallback } from 'react'; import { makeStyles, ListItem, Checkbox, IconButton, Divider, Menu, MenuItem, Tooltip } from '@material-ui/core'; import MoreVertIcon from '@material-ui/icons/MoreVert'; import RefreshIcon from '@material-ui/icons/Refresh'; import DeleteIcon from '@material-ui/icons/Delete'; import styles from './Item.module.css'; const useStyles = makeStyles(theme => ({ listItemRoot: { padding: 8, }, iconRoot: { color: theme.palette.primary.main, }, listRoot: { display: 'flex', }, })); const Item = props => { const { rssFeed, onClickCheckBox, onClickRefresh, onClickDelete, index, isChecked } = props; const classes = useStyles(); const [anchorEl, setAnchorEl] = useState(null); const openMenu = useCallback(event => { setAnchorEl(event.currentTarget); }, []); const closeMenu = useCallback(() => { setAnchorEl(null); }, []); return ( <React.Fragment> <ListItem classes={{ root: classes.listItemRoot }}> <Checkbox color="primary" checked={isChecked} onClick={() => onClickCheckBox(index)} /> <div className={styles.contentContainer}> <div>{rssFeed.title}</div> <div> <IconButton aria-label="refresh" color="primary" onClick={openMenu}> <MoreVertIcon /> </IconButton> <Menu keepMounted open={Boolean(anchorEl)} anchorEl={anchorEl} onClose={closeMenu} classes={{ list: classes.listRoot }} > <MenuItem onClick={() => onClickDelete(rssFeed.id)}> <DeleteIcon classes={{ root: classes.iconRoot }} /> </MenuItem> <MenuItem onClick={() => onClickRefresh(rssFeed.id)}> <Tooltip title="Check for new Rss Feed Items" enterDelay={700}> <RefreshIcon classes={{ root: classes.iconRoot }} /> </Tooltip> </MenuItem> </Menu> </div> </div> </ListItem> <Divider /> </React.Fragment> ); }; export default Item; <file_sep>version: '3.5' services: rss_aggregator_frontend: build: context: . ports: - 80:3000
4628c9cf9029eb376a4948a1410a09ce248789c1
[ "Markdown", "JavaScript", "YAML" ]
8
Markdown
MariusParasca/rss-reader-frontend
1b5ad3855ef37e8a3f1382c4dee2927d904784af
4fbc8a32a823dac9cae711c45f04bd6c0b49123c
refs/heads/main
<file_sep>import { Router } from "https://deno.land/x/oak@v6.5.0/mod.ts"; import { booksController } from './controllers/booksController.ts' const router = new Router(); router.get('/api/v1/books', booksController.getAll) router.get('/api/v1/books/:id', booksController.get) router.post('/api/v1/books', booksController.create) router.put('/api/v1/books/:id', booksController.update) router.delete('/api/v1/books/:id', booksController.delete) export { router } <file_sep>import { MongoClient } from "https://deno.land/x/mongo@v0.21.0/mod.ts"; import "https://deno.land/x/dotenv@v2.0.0/load.ts"; const client = new MongoClient(); await client.connect(Deno.env.get('MONGO_URI')!); export const db = client.database("deno_rest_api"); <file_sep>### echo Hello World! GET http://localhost:8080 ### Get All Books GET http://localhost:8080/api/v1/books ### Get Books by ID GET http://localhost:8080/api/v1/books/603b0164087250327b35addb ### Create Book POST http://localhost:8080/api/v1/books Content-Type: application/json { "title": "test book1", "author": "Joe", "price": 1200 } ### Update Book By Id PUT http://localhost:8080/api/v1/books/603b0164087250327b35addb Content-Type: application/json { "title": "update book1", "author": "Joe", "price": 1200 } ### Delete Book By ID DELETE http://localhost:8080/api/v1/books/603b0164087250327b35addb
9aaccc5c54f36af87cc306eff2ebc40cfc3bb23d
[ "TypeScript", "HTTP" ]
3
TypeScript
azukiazusa1/deno-rest-api
3a48ace78568ea6d5b4dd584c7b1ca0b86b384bd
cc5cf7f8794626324e0ef859883b23cd43781009
refs/heads/master
<file_sep># e300-app
84e5c1325cbc483b3a5257fb389f6d4edb756a75
[ "Markdown" ]
1
Markdown
huaninfo/e300-app
5b06d3c15c9e4959f54da258fc585888f500e1cd
d16993683204f8864d7c28d55f2e6326738a7707
refs/heads/master
<repo_name>edmundovanosdel/edmundovanosdel.github.com<file_sep>/README.md edmundovanosdel.github.com ========================== ndyhoohysylootwa
01ef3c2e41cd657a1e5962512eed1359671f064b
[ "Markdown" ]
1
Markdown
edmundovanosdel/edmundovanosdel.github.com
26592a6e6775c839d34c51764615ffe652d29f06
1f739e3b0858177726c136d79f4c9d4885d7bbbb
refs/heads/master
<file_sep># Dapp-Token-Farm
64ba7742f415b3a46fa4c27987978c8f7fd8cf60
[ "Markdown" ]
1
Markdown
skundu42/Dapp-Token-Farm
04e45662ffb42a38a2272b1c8f4223fbb92df2cb
a3463dbec7b0836a60203fe34998fa9cbecc782b
refs/heads/main
<file_sep># Lista_Animais O objetivo desse programa é inserir um nó com um nome de animal ou de qualquer ser vivo e também da espaço para colocar suas características, o programa também oferece várias opções como por exemplo: Inserir um novo nó, retirar um nó, buscar um nó, trocar um nó na posição anterior e posterior e etc. <file_sep>#include <iostream> #include <sstream> #include <cstdlib> #include <ctime> #include <cstdio> #include <iomanip> using namespace std; struct noAnimal { string nome; string especie; string porte; string dieta; float peso; }; const int N = 10; noAnimal vAnimal[N]={ {"Bob", "Lobo-Cinzento", "M","Carnivoro", 80}, {"Fifi", "Camundongo", "P", "Carnivoro", 0.019}, {"Espiriti", "Cavalo", "G", "Herbivoro", 1000}, {"Dumbo", "Elefante-da-savana", "GG", "Herbivoro", 6000}, {"Jaja", "Urso", "G", "Onivoro", 600} }; int final = 4; noAnimal val; char conf; bool sinal; void inseirirNoFim(); void removerNoPosicaoK(); void alterarNoPosicaoAnterior(); void inserirNoPosteriorAoNoK(); void consultarNo(); void removerNoPosterior(); void alterarConteudoDoNoAnteriorAX(); void quantidadeValorMaiorQue80(); void classificarOrdemDecrescente(); void alterarConteudoDaPosicaoK(); void procurarNoInserirNovoNoAnterior(); void imprimirLista(); void imprimirListaOrdemInversa(); int main() { // Configurações do Programa setlocale(LC_ALL, "Portuguese"); system("color 02"); char op = 'X'; while (op != 'O') { system("cls"); cout << "\n\n\t\t\tPrograma Lista de Animais\n\n"; //imprimir opções do menu cout << "\n\t\tO - Sair"; cout << "\n\t\tA - Inserir um nó no fim"; cout << "\n\t\tB - Remover um nó na posição K"; cout << "\n\t\tC - Alterar o conteúdo de um nó na posição K-1"; cout << "\n\t\tD - Procurar um nó e inserir um novo nó após o nó encontrado"; cout << "\n\t\tE - Consultar um nó com valor igual a X"; cout << "\n\t\tF - Remover o nó posterior ao nó da posição K"; cout << "\n\t\tG - Alterar o conteúdo de um nó anterior ao nó com valor igual a X"; cout << "\n\t\tH - Verificar a quantidade de nós com valor(peso) maior que 80"; cout << "\n\t\tI - Classificar em ordem decrescente"; cout << "\n\t\tJ - Alterar o conteúdo do nó da posição K"; cout << "\n\t\tK - Procurar um nó e inserir um novo nó anterior ao nó encontrado"; cout << "\n\t\tL - Imprimir a lista"; cout << "\n\t\tM - Imprimir a lista em ordem inversa"; cout << "\n\n\t\tEscolha uma opção "; fflush(stdin); op= getchar(); op = toupper(op); // Se for usar Alfabeto; // executar as funções conforme a escolha switch(op) { case 'O': { cout << "\n\t\tObrigado por usar nosso Sistema\n\n"; break; } case 'A': { inseirirNoFim(); break; } case 'B': { removerNoPosicaoK(); break; } case 'C': { alterarNoPosicaoAnterior(); break; } case 'D': { inserirNoPosteriorAoNoK(); break; } case 'E': { consultarNo(); break; } case 'F': { removerNoPosterior(); break; } case 'G': { alterarConteudoDoNoAnteriorAX(); break; } case 'H': { quantidadeValorMaiorQue80(); break; } case 'I': { classificarOrdemDecrescente(); break; } case 'J': { alterarConteudoDaPosicaoK(); break; } case 'K': { procurarNoInserirNovoNoAnterior(); break; } case 'L': { imprimirLista(); break; } case 'M': { imprimirListaOrdemInversa(); break; } default: { cout << "\n\t\tOpção Invalida: " << op; break; } } } } //Inserir um nó no fim void inseirirNoFim() { sinal = false; system ("cls"); cout << "\t\t\tInserir um novo nó no fim da lista\n\n"; if (final < N-1) { cout << "\n\t\tInforme um nome: "; cin >> val.nome; fflush(stdin); cout << "\n\t\tInforme uma especie: "; cin >> val.especie; fflush(stdin); cout << "\n\t\tInforme a dieta: "; cin >> val.dieta; fflush(stdin); cout << "\n\t\tInforme o porte (P = Pequeno | M = Médio | G = Grande | GG = Gigante): "; cin >> val.porte; fflush(stdin); cout << "\n\t\tInforme o peso: "; cin >> val.peso; fflush(stdin); cout << "\n\t\tConfirma a operação ? (S/N)"; cin >> conf; fflush(stdin); conf = toupper (conf); if (conf == 'S') { final ++; vAnimal[final] = val; sinal = true; } else { cout << "\n\n\t\tDados não confirmados"; } } else { cout << "\n\n\t\tLista cheia - overflow\n"; } system ("pause"); } //Remover nó na posição K void removerNoPosicaoK() { int k, aux; system("cls"); sinal = false; cout << "\n\t\t\tRemover nó na posição K\n\n"; if (final > -1) { //verificar se tem lista cout << "\t\t\tInforme a posição: "; cin >> k; fflush(stdin); if ((k >= 0) && (k <= final)) { val = vAnimal[k]; for (int i = 0; i < 75; i++) { cout << "-"; } cout << "\n\t\tNome: " << val.nome; cout << "\n\t\tEspécie: " << val.especie; cout << "\n\t\tDieta: " << val.dieta; cout << "\n\t\tPorte: " << val.porte; cout << "\n\t\tPeso: " << val.peso << endl; for (int i = 0; i < 75; i++) { cout << "-"; } cout << "\n\t\tConfirma a operação (S/N)"; cin >> conf; fflush(stdin); conf = toupper (conf); if (conf == 'S') { aux = k; while (aux < final) { vAnimal[aux] = vAnimal[aux + 1]; aux ++; } final--; sinal = true; } else { cout << "\n\t\tDados não confirmados "; } } else { cout << "\n\t\tA posição " << k << " nao pertence a lista"; } } else { cout << "\n\t\tLista vazia"; system ("pause"); } } //Alterar o conteúdo de um nó na posição K-1 void alterarNoPosicaoAnterior() { int k, aux; sinal = false; system("cls"); cout << "\n\t\t\tAlterar o conteúdo de um nó na posição K-1\n\n"; if (final > -1) { cout << "\t\t\tInforme o valor um valor: "; cin >> k; fflush(stdin); if ((k > 0) && (k <= final)) { val = vAnimal[k]; aux = k; cout << "\n\t\t\tDigite um nome: "; cin >> val.nome; fflush(stdin); cout << "\n\t\t\tDigite uma especie: "; cin >> val.especie; fflush(stdin); cout << "\n\t\t\tDigite a dieta: "; cin >> val.dieta; fflush(stdin); cout << "\n\t\t\tDigite o porte (P = Pequeno | M = Médio | G = Grande | GG = Gigante): "; cin >> val.porte; fflush(stdin); cout << "\n\t\t\tDigite o peso(Kg): "; cin >> val.peso; fflush(stdin); cout << "\n\t\t\tConfirma a Inserção de dados(S/N): "; cin >> conf; conf = toupper(conf); // Converte para maiusculo if (conf == 'S') { vAnimal[aux-1] = val; sinal = true; } else { cout << "\n\t\t\tDados não confirmados para a remoção"; cout << "\n\t\t"; system("pause"); } } else { cout << "\n\n\t\tA posição " << k << " é inválida"; cout << "\n\t\t"; system("pause"); } } else { cout << "\n\n\t\tLista Vazia..."; cout << "\n\t\t"; system("pause"); } } //Procurar um nó e inserir um novo nó após o nó encontrado void inserirNoPosteriorAoNoK() { noAnimal auxLista; int aux, k; system("cls"); sinal = false; cout << "\n\t\t\tProcurar um nó e inserir um novo nó após o nó encontrado\n\n"; if (final < N-1) { cout << "\t\t\tInforme a posição: "; cin >> k; fflush(stdin); if ((k >= 0) && (k < final)) { for (int i = 0; i < 75; i++) { cout << "-"; } cout << "\n\t\t\tInforme um nome: "; cin >> val.nome; fflush(stdin); cout << "\n\t\t\tInforme uma especie: "; cin >> val.especie; fflush(stdin); cout << "\n\t\t\tInforme a dieta: "; cin >> val.dieta; fflush(stdin); cout << "\n\t\t\tInforme o porte (P = Pequeno | M = Médio | G = Grande | GG = Gigante): "; cin >> val.porte; fflush(stdin); cout << "\n\t\t\tInforme o peso: "; cin >> val.peso; fflush(stdin); cout << "\n\t\t\tConfirma a operação ? (S/N)"; cin >> conf; fflush(stdin); conf = toupper (conf); if (conf == 'S') { final++; aux = k; while (aux < final) { auxLista = vAnimal[aux+1]; vAnimal[aux+1] = val; val = auxLista; aux ++; } sinal = true; } else { cout << "\n\t\t\tDados não confirmados "; system("pause"); } } else { cout << "\n\t\t\tA posição " << k << " nao pertence a lista\n\n"; system("pause"); } } else { cout << "\n\t\t\tLista cheia - overflow"; system("pause"); } } /*Consultar um nó com valor igual a X*/ void consultarNo() { int aux; string nome; system("cls"); sinal = false; cout << "\n\t\t\tConsultar um nó pelo nome\n\n"; if (final>-1) { cout << "\t\t\tDigite um nome: "; cin >> nome; fflush(stdin); cout << "\n\t\tConfirma a Inserção de dados(S/N): "; cin >> conf; conf = toupper(conf); // Converte para maiusculo if (conf == 'S') { aux = 0; while((aux != final) && (vAnimal[aux].nome != nome)) { aux++; } val = vAnimal[aux]; if (val.nome == nome) { cout << "\n\t\tO registro no nó " << aux << " te as seguintes informações" << "\n\n\n"; cout << "\t\t" << setw(7) << "Nome" << setw(15) << "Especie" << setw(15) << "Porte" << setw(15) << "Dieta" << setw(15) << "Peso" << endl; cout << "\t\t" << setw(7) << val.nome << setw(15) << val.especie << setw(15) << val.porte << setw(15) << val.dieta << setw(15) << val.peso << "\n\n"; sinal = true; system("pause"); } else { cout << "\n\t\tNão existe nenhum registro com o nome de " << nome << " na lista" << "\n\n"; system("pause"); } } else { cout << "\n\t\tDados não confirmados para buscar" << "\n\n"; system("pause"); } } else { cout << "\t\t\tLista Vazia"; system("pause"); } } /*Remover o nó posterior ao nó da posição K*/ void removerNoPosterior() { int k, aux; system("cls"); sinal = false; cout << "\n\t\t\tRemover o nó posterior ao nó da posição K\n\n"; if (final > -1) { cout << "\t\t\tInforme o valor de K: "; cin >> k; fflush(stdin); if ((k >= 0) && (k <= final)) { val = vAnimal[k]; cout << "\t\t" << setw(7) << "Nome: " << val.nome << setw(15) << "Especie: " << val.especie << setw(15) << "Porte: " << val.porte << setw(15) << "Dieta: " << val.dieta << setw(15) << "Peso(kg): " << val.peso << endl; cout << "\n\t\tConfirma a Inserção de dados(S/N): "; cin >> conf; conf = toupper(conf); // Converte para maiusculo if (conf == 'S') { aux = k; while (aux < final) { vAnimal[aux] = vAnimal[aux+1]; aux++; } final--; sinal = true; } else { cout << "\n\t\tDados não confirmados para a remoção"; cout << "\n\t\t"; system("pause"); } } else { cout << "\n\n\t\tA posição " << k << " não pertence a lista"; cout << "\n\t\t"; system("pause"); } } else { cout << "\n\n\t\tLista Vazia..."; cout << "\n\t\t"; system("pause"); } } /*Alterar o conteúdo de um nó anterior ao nó com valor igual a X*/ void alterarConteudoDoNoAnteriorAX() { int x, aux; system("cls"); sinal = false; cout << "\n\t\t\tAlterar o conteúdo de um nó anterior ao nó com valor igual a X\n\n"; if (final > -1) { cout << "\t\t\tInforme o valor um valor: "; cin >> x; fflush(stdin); if ((x > 0) && (x <= final)) { val = vAnimal[x]; aux = x; cout << "\n\t\tDigite um nome: "; cin >> val.nome; fflush(stdin); cout << "\n\t\tDigite uma especie: "; cin >> val.especie; fflush(stdin); cout << "\n\t\tDigite a dieta: "; cin >> val.dieta; fflush(stdin); cout << "\n\t\tDigite o porte (P = Pequeno | M = Médio | G = Grande | GG = Gigante): "; cin >> val.porte; fflush(stdin); cout << "\n\t\tDigite o peso(Kg): "; cin >> val.peso; fflush(stdin); cout << "\n\t\tConfirma a Inserção de dados(S/N): "; cin >> conf; conf = toupper(conf); // Converte para maiusculo if (conf == 'S') { vAnimal[aux-1] = val; sinal = true; } else { cout << "\n\t\tDados não confirmados para a remoção"; cout << "\n\t\t"; system("pause"); } } else { cout << "\n\n\t\t\tA posição " << x << " é inválida"; cout << "\n\t\t"; system("pause"); } } else { cout << "\n\n\t\t\tLista Vazia..."; cout << "\n\t\t"; system("pause"); } } //Verificar a quantidade de nós com valor maior que 80 void quantidadeValorMaiorQue80() { int maior80 = 0, cont; system("cls"); cout << "\n\n\t\t\tQuantidade de animais que possuem o peso superior a 80Kg\n\n"; sinal = false; if (final > -1) { cont = 0; while (cont <= final) { if ((float)vAnimal[cont].peso > 80) { maior80++; } cont++; } cout << "\n\t\t\tTem " << maior80 << " animal(is) que possuem o peso maior que 80Kg\n\n"; cout << "\n\t\t"; system("pause"); } else { cout << "\n\n\t\tLista Vazia...\n\n"; cout << "\n\t\t"; system("pause"); } } //Classificar em ordem decrescente void classificarOrdemDecrescente() { sinal = false; system("cls"); cout << "\n\t\t\t Classificação em ordem decrescente\n\n"; if (final >= -1){ for (int x=0; x<final; x++) { for (int y=0; y<final; y++){ if (vAnimal[y].nome < vAnimal[y+1].nome) { val = vAnimal[y]; vAnimal[y] = vAnimal[y+1]; vAnimal[y+1] = val; sinal = true; } } } } else { cout << "\n\t\t\tLista Vazia"; } system("pause"); } //Alterar o conteúdo do nó da posição k void alterarConteudoDaPosicaoK() { int k; sinal = false; system("cls"); cout << "\n\t\t\tAlterar o conteúdo do nó da posição k\n\n"; if (final > -1){ cout << "\n\t\tInforme uma posição: "; cin >> k; if (k>-1 && k<= final){ val = vAnimal[k]; cout << "\n\t\tInforme o nome: "; cin >> val.nome; fflush(stdin); cout << "\n\t\tInforme a especie: "; cin >> val.especie; fflush(stdin); cout << "\n\t\tInforme o porte (P = Pequeno | M = Médio | G = Grande | GG = Gigante): "; cin >> val.porte; fflush(stdin); cout << "\n\t\tInforme a dieta: "; cin >> val.dieta; fflush(stdin); cout << "\n\t\tInforme o peso: "; cin >> val.peso; fflush(stdin); cout << "\n\t\tConfirma a Inserção de dados(S/N): "; cin >> conf; conf = toupper(conf); // Converte para maiusculo if (conf = 'S'){ vAnimal[k] = val; sinal = true; } else { cout << "\n\t\tDados não confirmados para a alteração\n\n"; } } else { cout << "\n\t\tA posição " << k << " não pertence a lista\n\n"; } } else { cout << "\n\t\tLista vazia\n\n"; } system("pause"); } //Procurar um nó e inserir um novo nó anterior ao nó encontrado void procurarNoInserirNovoNoAnterior() { string x; int aux, fim; system("cls"); cout << "\n\t\t\tProcurar um nó e inserir um novo nó anterior ao nó encontrado\n\n"; sinal = false; if (final>-1 && final<N-1){ cout << "\n\t\tInforme x: "; cin >> x; aux = 0; while (aux<final && vAnimal[aux].nome != x) { aux++; } if (vAnimal[aux].nome == x){ if (aux > 0){ cout << "\n\t\tInforme o nome: "; cin >> val.nome; fflush(stdin); cout << "\n\t\tInforme a especie: "; cin >> val.especie; fflush(stdin); cout << "\n\t\tInforme o porte: "; cin >> val.porte; fflush(stdin); cout << "\n\t\tInforme a dieta: "; cin >> val.dieta; fflush(stdin); cout << "\n\t\tInforme o peso: "; cin >> val.peso; fflush(stdin); cout << "\n\t\tConfirma a Inserção de dados(S/N): "; cin >> conf; conf = toupper(conf); // Converte para maiusculo if (conf = 'S'){ fim = final + 1; while(fim > aux){ vAnimal[fim] = vAnimal[fim-1]; fim = fim - 1; } vAnimal[aux] = val; sinal = true; final = final + 1; } else { cout << "\n\t\tNao confirmado"; } } else { cout << "\n\t\tNao existe no antes da posição 0"; } } else { cout << "\n\t\t " << x << " nao pertence a lista"; } } else { cout << "\n\t\tLista vazia\n"; } system("pause"); } //Imprimir Lista void imprimirLista() { int aux; system("cls"); sinal= false; cout << "\n\t\t\tImprimir Lista Senador\n\n"; if (final >- 1) { cout << setw(10) << "Nome" << setw(30) << "Especie" << setw(10) << "Porte" << setw(20) << "Dieta" << setw(10) << "Peso" << endl; for (int i = 0; i < 80; i++) { cout << "-"; } cout << endl; //IMPRIMIR A LISTA aux = 0; while (aux <= final) { val = vAnimal[aux]; cout << setw(10) << val.nome << setw(30) << val.especie << setw(10) << val.porte << setw(20) << val.dieta << setw(10) << val.peso << "\n\n"; aux++; } for (int i = 0; i < 80; i++) { cout << "-"; } cout << endl; sinal = true; system("pause"); } else { cout << "\nLista Senador Vazia..."; } } //Imprimir a lista em ordem inversa void imprimirListaOrdemInversa() { int x; sinal = false; system("cls"); cout << "\n\t\t\tImprimir lista inversa\n\n"; if (final > -1){ cout << setw(10) << "Nome" << setw(30) << "Especie" << setw(10) << "Porte" << setw(20) << "Dieta" << setw(10) << "Peso" << endl; for (int i = 0; i < 80; i++) { cout << "-"; } cout << endl; for (x=final; x>=0; x--){ val = vAnimal[x]; cout << setw(10) << val.nome << setw(30) << val.especie << setw(10) << val.porte << setw(20) << val.dieta << setw(10) << val.peso << "\n\n"; } for (int i = 0; i < 80; i++) { cout << "-"; } cout << endl; sinal = true; } else { cout << "\nLista Senador Vazia..."; } cout << "\n"; system("pause"); }
4eff6d47ed1949a0131ab7066ae63e5ff05c2732
[ "Markdown", "C++" ]
2
Markdown
Emanuel-Hashimoto/Lista_Animais
115e3b0f3d548581d7c5a601ca94f693ae58925b
80acdf42464ebdd367063b15a643a0f774f45ca0
refs/heads/main
<file_sep># Trippples A recreation of the game Trippples in Racket. Two client worlds may connect to a server at once. On startup of the client launc, a login window will show. This window closes when you attempt to log into the server. Additional clients are immediately booted off. No spectator mode here. Clients send a packet of two integers to the server. First represents the cursor location. The second the selected tile. The server sends the new world state after each valid update. This includes the board position, player token positions, unplaced tiles, and turn order.
c33d0eecf1113e444f30390b1dc76b6d3aa58834
[ "Markdown" ]
1
Markdown
Skallos-s/Trippples
e3b4aa541eafc69a749ea08d09f2fb1070460a78
99927b346cf5bc6ac005ca0f835a2996ebfea75a
refs/heads/master
<repo_name>VictorGarritano/MetodosNumericos2<file_sep>/README.md # Metodos Numéricos 2 Esse repositório contém anotações de aula da disciplina *Métodos Numéricos 2* As aulas são divididas em arquivos separados, bem como os exercícios e soluções.
63b00000a1051b7340595c357a8e216377676d83
[ "Markdown" ]
1
Markdown
VictorGarritano/MetodosNumericos2
c7d665b0e5d4a33d69b651452e5639bcbc3d7b77
b554c37c9af2d7f0f2b41bf76a5444646ddd6d0d
refs/heads/master
<file_sep>#!/bin/bash # # make a branch git checkout -b $1; CURRENT_BRANCH="$(git status --branch --porcelain | head -n 1 | sed 's/## //')" # # check if [ $CURRENT_BRANCH = $1 ] ; then # empty commit if [ $2 ] ; then git commit --allow-empty -m "$2"; else git commit --allow-empty -m "$1 init."; fi # log git log read -p "Push? (y/n): " answer case "$answer" in [yY]*) git push origin HEAD; hub pull-request -m "$1" -l meeting task: feature -a ayanakahara -F - --edit < ../auto_initial_commit/.github/PULL_REQUEST_TEMPLATE.md -o;; *) echo "no push." ; exit ;; esac else echo "Oh.. you can not make a branch :("; fi
a042903be3616f9662f1d9654e501b5a0d65b35c
[ "Shell" ]
1
Shell
ayanakahara/auto_initial_commit
dd0d778afb48d82b3b842694fedc5d095de82fda
653416e39bfe752e19949a84e9a97a352429d5a6
refs/heads/main
<file_sep>import byteplot as bp import torch from torch.utils import data from torch.utils.data import Dataset, DataLoader import os import numpy as np import cv2 path_name_benign = 'Sample\\' path_name_malicious = 'Sample\\' class PDFDataset(Dataset): def __init__(self,plot_type): # call the __init__() of its parent class super().__init__() # We need to know if the incoming pdfs are converted to grayscale images using the byte plot or markov plot self.plot_type = plot_type self._load() # do something to initialize the pdf dataset object pass def __len__(self): # return the number of instances of the dataset # how to reference of this list like you did in your dataset? return len(self.labels) def __getitem__(self, idx): # return X, y, which is the array at index idx and the label (benign or malicious) at idx # X is the byte plot or markov plot # y is the corresponding label of X X = self.X[idx] y = self.labels[idx] X, y = self._to_tensor(X, y) return X, y def _to_tensor(self, X, y): return torch.tensor(X, dtype=torch.float32), torch.tensor(y, dtype=torch.long) # Get the actual dataset pdfs and load them into a dictionary(good and bad pdfs) def _load(self): # In a sample directory with only 10 pdfs for now benign_files = (path_name_benign) malicious_files = (path_name_malicious) # Dictionary to store each plot for good and bad pdfs data_by_type = {'benign':None,'malicious':None} path_list_benign = [] path_list_malicious = [] # only 2 key/value pairs in the dictionary, 'benign' and 'malicious'. Each value will be a list containing x and y path names to corresponding grayscale images if self.plot_type == 'byte_plot': # Convert all pdfs to images and save their paths in a list for file_name in os.listdir(benign_files): if file_name.endswith('pdf'): # converts each image and adds its respective integer array to the dictionary bp.convert(benign_files,file_name,256) img_file_name=file_name.replace("pdf","png") path_name = f"{benign_files}{img_file_name}" path_list_benign.append(cv2.imread(path_name,cv2.IMREAD_UNCHANGED)) # add this list to the dictionary as benign's value data_by_type['benign'] = path_list_benign # Do the same for the malicious files for file_name in os.listdir(malicious_files): if file_name.endswith('pdf'): bp.convert(malicious_files,file_name,256) img_file_name=file_name.replace("pdf","png") path_name = f"{malicious_files}{img_file_name}" path_list_malicious.append(cv2.imread(path_name,cv2.IMREAD_UNCHANGED)) data_by_type['malicious'] = path_list_malicious elif self.plot_type == 'markov_plot': data_by_type['benign'] = 1 # put the function call here to convert data_by_type['malicious'] = 1 # put the function call here to convert # Creates one large list by concatenating 2 smaller lists. Each smaller list has size = total number of values under the corresponding key in the data_by_type dictionary(benign or malicious). The large list (labels) has the same size as the entire dataset, and consists of x entries of "malicious" and y entries of "benign" labels = [] for k in data_by_type.keys(): n = len(data_by_type[k]) label = np.repeat(k,n) labels.extend(label) self.labels = labels # Implement list of inputs, X X = [] for k in data_by_type.keys(): X.extend(data_by_type[k]) self.X = X def testcase_test_pdfdataset(): dataset = PDFDataset('byte_plot') # setup dataloader # check pytorch document for the parameter list dl = DataLoader( dataset, batch_size=32, shuffle=True, num_workers=1, pin_memory=True, drop_last=True, ) # print(dataset.X) # loop through dataset for X, y in dl: # print out X and y print(X, y) # __name__ is an attribute of the file itself, essentially a 'main' function if __name__ == '__main__': testcase_test_pdfdataset()
2bb575ffff69cab13792805858b42abc032c8190
[ "Python" ]
1
Python
jackzemlanicky/NMTLab2
821573da7a34ca0120d3eaacc3559bcf0bb5f196
1655de54cc908433ed25f47bcc67b18801202287
refs/heads/master
<file_sep>import requests import datetime import time import configparser import logging from multiprocessing import Pool logger = logging.getLogger("booking seat") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ch.setFormatter(formatter) # 将相应的handler添加在logger对象中 logger.addHandler(ch) cf = configparser.ConfigParser() cf.read('booking.conf') START_TIME = cf.get("time", "START_TIME") END_TIME = cf.get("time", "END_TIME") url = cf.get("HTTP_INFO", "url") # 从配置中读取string类型的位置信息,转换成list类型并strip每个元素 str_goodSeatNo = cf.get("seatnos", "goodSeatNo") goodSeatNo = [] for j in str_goodSeatNo.split(','): goodSeatNo.append(j.strip()) alluserinfo = [] for section in cf.sections(): if "userinfo" in section: alluserinfo.append(dict(cf.items(section))) logger.info("START_TIME: " + START_TIME) logger.info("END_TIME: " + END_TIME) logger.info("url: " + url) logger.info("good SeatNo list: " + str(goodSeatNo)) logger.debug("all user info: " + str(alluserinfo)) # get 阅览室id :yls_id def get_ysl_id(): try: yls_id = '' response = requests.post(url + "/seat/yuelanshi_list?mode=local") if "排队成功" in response.json()["content"]: serial_no = response.json()["serialno"] else: logger.debug(str(response.json()["content"])) return yls_id response = requests.post(url + "/seat/get_task_status?serialno=" + serial_no) content = response.json()["content"] if ("没有可以预定的阅览室" in content) or ("排队中请等待" in content): logger.info(str(response.json()["content"])) return yls_id else: logger.debug("获取阅览室ID: " + str(response.json()["content"])) yls_id = response.json()["content"][0]["id"] logger.info("阅览室ID: " + str(yls_id)) return yls_id except Exception as e: print("Exception: ", e) return yls_id # 选定位置 def check_position(user_info, yls_id): try: # 获取阅览室可订位置列表,并获取排队序号 response = requests.post(url + '/seat/yuelanshi_seat?mode=local&yuelanshiId=' + yls_id) ser_no2 = response.json()["serialno"] logger.debug("排序号: " + str(ser_no2)) # 根据当前的排队序号,查看还有多少可选的位置 # logger.info("查看可选位置......") response = requests.post(url + '/seat/get_task_status?serialno=' + ser_no2) logger.debug(str(response.json()["content"])) content = response.json()["content"] if ("没有座位" in content) or ("排队中" in content): logger.info(str(response.json()["content"])) return False logger.info("remain seat num: " + str(len(response.json()["content"]))) logger.info("remain seat info: " + str(response.json()["content"])) # 空着的位置有哪些 remain_seats = [] for i in range(0, len(response.json()["content"])): remain_seats.append(response.json()["content"][i]["seatno"]) logger.debug("type([0]seat_no): " + str(type(response.json()["content"][0]["seatno"]))) logger.debug("all seat no: " + str(remain_seats)) # 最想要的几个位置还空着的话,就按先后顺序选最想要的位置 seat_no = '0' for g in goodSeatNo: if (g in remain_seats) or (int(g) in remain_seats): seat_no = g logger.debug("str(type(g)): " + str(type(g))) break # 没有选上中意的位置时,取剩余的中间位置 if '0' == seat_no: middle = int(len(response.json()["content"]) / 2) seat_no = response.json()["content"][middle]["seatno"] # 根据阅览室id,选取的位置seat_no,帐户信息进行预定 logger.info("select seatNo: " + str(seat_no)) my_data = {'yuelanshiId': yls_id, 'seatNo': seat_no} my_data.update(user_info) logger.debug(url + '/seat/orderMySeat?mode=local' + str(my_data)) response1 = requests.post(url + '/seat/orderMySeat?mode=local', my_data) # 排队成功后,验证结果,查看是否预定成功 logger.debug(str(response1.json()["content"])) if "排队成功!" in response1.json()["content"]: check_serialno = response1.json()["serialno"] headers = {"Referer": url + "/seat/XueShengloginByNo.jsp?yid=" + str( yls_id) + "&seatno=" + str(seat_no)} logger.debug("headers info : " + str(headers)) # 1秒后,根据返回序列号及之前提交的位置信息,判断本次是否预定上,或是之前已经预定上 time.sleep(1) response2 = requests.post( url + '/seat/get_task_status?serialno=' + str(check_serialno), headers=headers) logger.debug(str(response2.json()["content"])) booking_sd = response2.json()["content"] if ("之前到终端机刷卡" in booking_sd) or ("不可以连续预定." in booking_sd): return True else: return False except Exception as e: print("Exception: ", e) return False def book_position(): # 在指定时间内才开始预定 now_time = datetime.datetime.now().time().strftime("%H:%M:%S") while END_TIME < now_time < START_TIME: logger.info("booking time is not coming, waiting 30 s ...... ") time.sleep(30) now_time = datetime.datetime.now().time().strftime("%H:%M:%S") logger.info("start booking ......") # get ysl id while True: ysl_id = get_ysl_id() if len(ysl_id) > 0: break # for every user book seat for user_info in alluserinfo: while True: if check_position(user_info, ysl_id): logger.info("Book success !") break else: pass book_position() # # main function # p_cnt = 1 # if __name__ == '__main__': # p = Pool(p_cnt) # for i in range(p_cnt): # p.apply_async(book_position) # p.close() # p.join() <file_sep># ccelib_booking_seat booking_seat
268c82513fbe5b315bde0636f6834ff514843e20
[ "Markdown", "Python" ]
2
Markdown
zhangwj8517/ccelib_booking_seat
2be6a5adad10d7b6d1c517b39da6e21021566a9a
6a46a0c6184b62c83e63318c395679a165218eb3
refs/heads/master
<repo_name>jrbrady94/jrbrady94.GITHUB.IO<file_sep>/README.md # jrbrady94.GITHUB.IO Web Development Project <file_sep>/index.html <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <meta name="viewport" content="width=device-wideth, intial-scale=1.0"> <link href="CSS/css.css" rel="stylesheet" type="text/css"> <title>index.html</title> </head> <body> <header> <figure> <img width="200" height="150" src="cit230/ponder-prove/images/Logo.jpg" alt="Paradise Logo" title="Find your Paradise!"> </figure> </header> <br> <nav> <ol> <li><a href="index.html">Home</a></li> <li><a href="../jrbrady94.GITHUB.IO/indv-website/intro.html">Introduction</a></li> <li><a href="../jrbrady94.GITHUB.IO/indv-website/Packages%20Info.html">Packages Information</a></li> <li><a href="../jrbrady94.GITHUB.IO/indv-website/Hawaii-Trip.html">Hawaiian Trip Information</a></li> <li><a href="../jrbrady94.GITHUB.IO/indv-website/Bahammas-Trip.html">Bahammas Trip Information</a></li> <li><a href="../jrbrady94.GITHUB.IO/cit230-links.html"> CIT230-links (Link)</a></li> </ol> </nav> <div class="flex-row"> <div class="img-ctr"><img src="../jrbrady94.GITHUB.IO/cit230/ponder-prove/images/cruise-home.jpg" alt="A View of the Ship" /></div> <div class="flex-item"> <p>Welcome to paradise cruises! To find more information please click on link <a href="indv-website/intro.html"> Intro </a> for our introduction. Please navigate through all of our links and click on our Home tab to bring you back here. We hope you book with us real soon! Please feel free at any time to call our toll line to book your cruise today!</p> </div> </div> <footer> Call 1-800-BOOKNOW! </footer> </body> </html>
d755d89f91f22a6285b839b024c2f578de2525f0
[ "Markdown", "HTML" ]
2
Markdown
jrbrady94/jrbrady94.GITHUB.IO
f90369a50e83aac5b0e8523d8e6794766b37e140
f26b4c031484497566a9744c53c01f8438ccccd3
refs/heads/master
<repo_name>cmwgh/brouwers<file_sep>/brouwers/src/main/resources/teksten.properties goedeMorgen=Goede morgen goedeMiddag=Goede middag goedeAvond=Goede avond goedeNacht=Goede nacht
c43aafbd118a8d605de3b417eec9a612cbf6a96d
[ "INI" ]
1
INI
cmwgh/brouwers
5ba3f0837a8fcb16a0681ca772ec290987714edb
f3f97540434c79620540770a70998bad5ade0529
refs/heads/master
<repo_name>sve009/csc207-skip-lists<file_sep>/src/README.md Skip Lists Assignment ===================== Some starter code for the skip list assignment. ### Analysis First, three scatter plots have been created for the set, get, and remove methods. Next, here is the table used for these plots. It was collected by the SkipListExpt class: ```text |100 |1000 |10000 |100000 |1000000 |10000000| Set |5 |9 |12 |18 |21 |24 | Get |4 |10 |11 |16 |18 |23 | Remove |5 |10 |14 |19 |21 |23 | ``` These togather clearly show some kind of logarithmic function as the slope of the function approaches zero as n grows larger and large. I would highly reccomend looking at the scatterplots. Coolio. Sams out. ### Sources http://www.alcula.com/ --- Used to generate scatteplots
4f22ee7390983e7e9d8b55447eb1b7ad47ea3453
[ "Markdown" ]
1
Markdown
sve009/csc207-skip-lists
a46f40ce13518c87ecd2371a9d0c72cd79cc9690
61a6d66ffc103bfa2a6cda5fcfe01ed8d7347f2e
refs/heads/master
<repo_name>DATGGolo/website-manager<file_sep>/README.md # website-manager website manager <file_sep>/php/FindOrder.php <?php include 'DataBase.php'; function FindbyId($id) { $db = DB(); $sql = "SELECT A\n" . " .會員編號,\n" . " A.\"會員帳號\",\n" . " A.\"行程編號\",\n" . " a1.\"行程名稱\",\n" . " a1.\"行程日期\",\n" . "FROM\n" . " \"會員資料\" A,\n" . " \"行程資料\" a1 \n" . "WHERE\n" . " A.\"行程編號\" = '" . $id . "' \n" . " AND A.\"會員編號\" = a1.\"會員編號\" \n" . "ORDER BY\n" . " a1.\"會員編號\";"; $result = $db->query($sql); $out = false; while ($row = $result->fetch(PDO::FETCH_OBJ)) { //PDO::FETCH_OBJ 指定取出資料的型態 // echo '<tr>'; // echo '<td>' . $row->會員編號 . "</td><td>" . $row->會員帳號 . "</td>"; // echo '</tr>'; $schedule = $row->行程編號; echo ' <hr/> <p> 編號:' . $row->會員編號. '</p> <p> 帳號:' . $row->會員帳號 . '</p> <p> 行程編號:' . $row->行程編號 . '</p> <p> 行程名稱:' . $row->行程名稱 . '</p> <P> 行程日期:' . $row->行程日期 . '</p>'; $out = true; } if (!$out) { echo '<div class ="Err" style="color:red;"> 查不到資料! 請檢查輸入資料是否正確!</div>'; echo '<script> swal({ text: "查不到資料! 請檢查輸入資料是否正確!", icon: "error", button: false, timer: 3000, }); </script>'; } } function FindbyName($name) { $db = DB(); $sql = "SELECT A\n" . " .會員編號,\n" . " A.\"會員帳號\",\n" . " A.\"行程編號\",\n" . " a1.\"行程名稱\",\n" . " a1.\"行程日期\",\n" . "FROM\n" . " \"會員資料\" A,\n" . " \"行程資料\" a1 \n" . "WHERE\n" . " A.\"行程編號\" = '" . $id . "' \n" . " AND A.\"會員編號\" = a1.\"會員編號\" \n" . "ORDER BY\n" . " a1.\"會員編號\";"; $result = $db->query($sql); $out = false; while ($row = $result->fetch(PDO::FETCH_OBJ)) { //PDO::FETCH_OBJ 指定取出資料的型態 // echo '<tr>'; // echo '<td>' . $row->會員編號 . "</td><td>" . $row->會員帳號 . "</td>"; // echo '</tr>'; echo ' <hr/> <p> 編號:' . $row->會員編號. '</p> <p> 帳號:' . $row->會員帳號 . '</p> <p> 行程編號:' . $row->行程編號 . '</p> <p> 行程名稱:' . $row->行程名稱 . '</p> <P> 行程日期:' . $row->行程日期 . '</p>'; $out = true; } if (!$out) { echo '<div class ="Err" style="color:red;"> 查不到資料! 請檢查輸入資料是否正確!</div>'; echo '<script> swal({ text: "查不到資料! 請檢查輸入資料是否正確!", icon: "error", button: false, timer: 3000, }); </script>'; } } function FindOrder($id, $name) { $db = DB(); $sql = "SELECT A\n" . " .會員編號,\n" . " A.\"會員帳號\",\n" . " A.\"行程編號\",\n" . " a1.\"行程名稱\",\n" . " a1.\"行程日期\",\n" . "FROM\n" . " \"會員資料\" A,\n" . " \"行程資料\" a1 \n" . "WHERE\n" . " A.\"行程編號\" = '" . $id . "' \n" . " AND A.\"會員編號\" = a1.\"會員編號\" \n" . "ORDER BY\n" . " a1.\"會員編號\";"; $result = $db->query($sql); $out = false; while ($row = $result->fetch(PDO::FETCH_OBJ)) { //PDO::FETCH_OBJ 指定取出資料的型態 // echo '<tr>'; // echo '<td>' . $row->顧客編號 . "</td><td>" . $row->顧客名稱 . "</td>"; // echo '</tr>'; $schedule = $row->房型編號; echo ' <hr/> <p> 編號:' . $row->會員編號. '</p> <p> 帳號:' . $row->會員帳號 . '</p> <p> 行程編號:' . $row->行程編號 . '</p> <p> 行程名稱:' . $row->行程名稱 . '</p> <P> 行程日期:' . $row->行程日期 . '</p>'; $out = true; } if (!$out) { echo '<div class ="Err" style="color:red;"> 查不到資料! 請檢查輸入資料是否正確!</div>'; echo '<script> swal({ text: "查不到資料! 請檢查輸入資料是否正確!", icon: "error", button: false, timer: 3000, }); </script>'; } $db = NULL; } function FindUser ($acc , $password){ $db = DB(); $sql = "SELECT * FROM \"Manager\" WHERE \"account\"='".$acc."' and \"password\"='".$<PASSWORD>."'"; $result = $db->query($sql); $row = $result->fetch(PDO::FETCH_ASSOC); if($row>1){ $_SESSION["acc"] = $acc; $_SESSION["password"] = $<PASSWORD>; header('Location: ../maneger/userIndex.php'); }else{ echo '<script> swal({ text: "查不到資料! 請檢查輸入資料是否正確!", icon: "error", button: false, timer: 3000, }); </script>'; } } function logInSure(){ if($_SESSION["account"] == ""){ // echo '<script> swal({ // text: "未登入或登入逾時! 兩秒後跳轉至登入畫面!", // icon: "error", // button: false, // timer: 2000, // }); </script>'; header('Location: ../maneger.php'); $_SESSION["unLog"] = true; // echo '<meta http-equiv="refresh" content="2;url=../maneger.php" />'; } } <file_sep>/maneger/maneger.php <!doctype html> <?php session_start(); include '../php/FindOrder.php'; ?> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>管理者介面</title> <!-- 連結思源中文及css --> <link href="https://fonts.googleapis.com/css?family=Noto+Sans+TC" rel="stylesheet"> <link href="../images/user.jpg" rel="icon"> <link href="css/main.css" rel="stylesheet"> <link href="css/menu.css" rel="stylesheet"> <link href="assets/css/main.css" rel="stylesheet"> <script src="assets/js/sweetalert.min.js" type="text/javascript"></script> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <!-------------------------> </head> <body> <?php if(isset($_SESSION["unLog"])){ if($_SESSION["unLog"]){ echo '<script> swal({ text: "未登入或登入逾時!", icon: "error", button: false, timer: 2000, }); </script>'; session_unset(); } } if (isset($_POST["next"])) { findUser($_POST["account"], $_POST["password"]); } ?> <!-- Header --> <header id="header" class="alt"> <div class="logo"><a href="../index/index.html">TaipeiMRT <span>RESORT</span></a></div> <a href="#menu">Menu</a> </header> <!-- Nav --> <nav id="menu"> <ul class="links"> <li><a href="../../news/news.html">最新消息</a></li> <li><a href="../../room/room.php">\服務</a></li> <li><a href="../../room/roomSpace.php">查詢行程</a></li> <li><a href="../../about/about.html">關於我們</a></li> <li><a href="../../information/information.php">聯絡資訊</a></li> <li style="margin-top: 200%"><a href="../maneger/maneger.php">管理者介面</a></li> <li style="margin-top: 0%"><a href="../maneger/php/logOut.php">登出</a></li> </ul> </nav> <section id="One" class="wrapper style3"> <div class="inner" style="z-index: 1"> <header class="align-center"> <h2>Maneger Page</h2> </header> </div> </section> <!--**************************--> <div class ="nav"> <ul id="navigation" style="z-index: 2; background:#F1EEC2;"> <li><a href="userIndex.php" style="color:#000; ">主頁</a></li> <li class="sub"> <a href="#" style="color:#000; ">會員</a> <ul style="z-index: 2; "> <li><a href="customer/all.php">會員總覽</a></li> <li><a href="customer/add.php">新增</a></li> <li><a href="customer/delete.php">刪除</a></li> <li><a href="customer/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">管理員</a> <ul style="z-index: 2"> <li><a href="employee/all.php">管理員總覽</a></li> <li><a href="employee/add.php">新增</a></li> <li><a href="employee/delete.php">刪除</a></li> <li><a href="employee/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">行程</a> <ul style="z-index: 2"> <li><a href="order/all.php">行程總覽</a></li> <li><a href="order/delete.php">刪除</a></li> <li><a href="order/change.php">更新</a></li> </ul> </li> </ul> </div> <div class="container"> <!--~~~~~~~~~~~~~~~~~--> <div class="content"> <h2>管理者登入</h2> <form method="post" action=""> <div class="6u 12u$(small)" style="margin-left: 20%"> <p>帳號:</p> <input type="text" name="account" id="account" value="" placeholder="" required> </div> <br/> <div class="6u$ 12u$(small)" style="margin-left: 20%"> <p>密碼:</p> <input type="<PASSWORD>" name="password" id="password" value="" placeholder="" required> </div> <div class="12u$"> <ul class="actions"> <div align="right" style="margin-right: 5%"> <li><input type="submit" name="next" value="ENTER"></li> </div> </ul> </div> </form> </div> <!-- Scripts --> <script src="assets/js/jquery.min.js"></script> <script src="assets/js/jquery.scrollex.min.js"></script> <script src="assets/js/skel.min.js"></script> <script src="assets/js/util.js"></script> <script src="assets/js/main.js"></script> <script src="https://unpkg.com/sweetalert/dist/sweetalert.min.js"></script> </div> <!--~~~~~~~~~~~~~~~~~--> <div class="footer"> &copy; NTUB GROUP </div> <!--**************************--> </body> </html> <file_sep>/maneger/php/logOut.php <?php session_start(); $_SESSION["acc"] = ""; header('Location: ../maneger.php');<file_sep>/php/roomData.php <?php session_start(); include 'DataBase.php'; $db = DB(); $sql = "INSERT INTO \"顧客資料\" ( \"顧客名稱\", \"生日\", \"身分證字號\", \"連絡電話\"," . " \"電子郵件\", \"性別\" )VALUES( '".$_SESSION["name"] ."', '".$_SESSION["bir"] ."', '".$_SESSION["id"] ."', " . "'".$_SESSION["phone"] ."', '".$_SESSION["email"] ."' , '".$_SESSION["gender"] ."' );"; echo $sql; $db->query($sql); $sql = "SELECT \"顧客編號\" FROM \"顧客資料\" WHERE 身分證字號 = '". $_SESSION["id"] ."'"; $result = $db->query($sql); $row = $result->fetch(PDO::FETCH_OBJ); $cusNo = $row->顧客編號; if(isset($_SESSION["a"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["a"] ."' , '". $_SESSION["OrderDate"] ."' , '".$_SESSION["a_house"]."' , '".$_SESSION["a_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["b"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["b"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["b_house"]."' , '".$_SESSION["b_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["c"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["c"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["c_house"]."' , '".$_SESSION["c_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["d"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["d"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["d_house"]."' , '".$_SESSION["d_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["e"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["e"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["e_house"]."' , '".$_SESSION["e_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["f"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["f"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["f_house"]."' , '".$_SESSION["f_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["g"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["g"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["g_house"]."' , '".$_SESSION["g_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["h"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["h"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["h_house"]."' , '".$_SESSION["h_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["i"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["i"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["i_house"]."' , '".$_SESSION["i_bed"]."' )"; echo $sql; $db->query($sql); } if(isset($_SESSION["j"])){ $sql = "INSERT INTO \"顧客訂房\" (\"顧客編號\" , \"房型編號\" , \"訂房日期\" ," . " \"訂購間數\" , \"加床\") VALUES ('".$cusNo."' , '".$_SESSION["j"] ."' , '". $_SESSION["OrderDate"]."' , '".$_SESSION["j_house"]."' , '".$_SESSION["j_bed"]."' )"; echo $sql; $db->query($sql); } $db = NULL; header("Location:../room-success/room-success.php"); //$result = $db->query($sql); //$sql = "select * from \"Cus\" "; //$result = $db->query($sql); //echo '<table border="1">'; //while ($row = $result->fetch(PDO::FETCH_OBJ)) { ////PDO::FETCH_OBJ 指定取出資料的型態 // echo '<tr>'; // echo '<td>' .$row->顧客編號 . "</td><td>" . $row->顧客名稱 . "</td>"; // echo '</tr>'; //} //echo '</table>'; <file_sep>/maneger/order/add.php <!doctype html> <?php session_start(); include '../../php/FindOrder.php'; @logInSure(); ?> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>新增訂單</title> <!-- 連結思源中文及css --> <link href="https://fonts.googleapis.com/css?family=Noto+Sans+TC" rel="stylesheet"> <link href="../../images/user.jpg" rel="icon"> <link href="css/main.css" rel="stylesheet"> <link href="css/menu.css" rel="stylesheet"> <link href="assets/css/main.css" rel="stylesheet"> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <script src="assets/js/sweetalert.min.js" type="text/javascript"></script> <!-------------------------> </head> <body> <?php $nameErr = $emailErr = $genderErr = $idErr = $birErr = $phoneErr = $DateErr = ""; $name = $id = $bir = $num = $phone = $email = ""; $sure = true; if (isset($_POST["Reg"])) { $name = $_POST["name"]; $id = $_POST["id"]; $bir = $_POST["bir"]; $phone = $_POST["phone"]; $email = $_POST["email"]; if (empty($_POST["name"])) { $nameErr = "姓名是必填的!"; $sure = false; } if (empty($_POST["id"])) { $idErr = "身分證是必填的!"; $sure = false; } else { $idtest = test_input($_POST["id"]); if (!preg_match("/^[A-Z]{1}[0-9]{9}$/", $idtest)) { $idErr = "身分證不符合格式!"; $sure = false; } } if (empty($_POST["bir"])) { $birErr = "生日是必填的!"; $sure = false; } else { // $date = (strtotime($bir) - strtotime(date('Y-m-d'))) / (365*3+366); $age = round((time() - strtotime($bir)) / (24 * 60 * 60) / 365.25, 0); if ($age < 20) { $birErr = "低於20歲無法訂房!"; $sure = false; } } if (empty($_POST["phone"])) { $phoneErr = "手機是必填的!"; $sure = false; } else { $phonetest = test_input($_POST["phone"]); if (!preg_match("/^09[0-9]{8}$/", $phonetest)) { $phoneErr = "手機號碼不符合格式!"; $sure = false; } } if (empty($_POST["email"])) { $emailErr = "E-mail是必填的!"; $sure = false; } if ($sure) { $db = DB(); $sql = "INSERT INTO \"顧客資料\" ( \"顧客名稱\", \"生日\", \"身分證字號\", \"連絡電話\"," . " \"電子郵件\", \"性別\" )VALUES( '" . $_POST["name"] . "', '" . $_POST["bir"] . "', '" . $_POST["id"] . "', " . "'" . $_POST["phone"] . "', '" . $_POST["email"] . "' , '" . $_POST["gender"] . "' );"; $db->query($sql); // echo 'swal("新增成功!", "回到客戶總覽 或是 客戶新增?", "success").then(function (result) { // // window.location.href = "http://tw.yahoo.com"; // }); '; echo ' <script> swal({ title: "新增成功!", text: "回到客戶總覽 或是 客戶新增?", icon: "success", buttons: { 1: { text: "客戶總覽", value: "客戶總覽", }, 2: { text: "客戶新增", value: "客戶新增", }, }, }).then(function (value) { switch (value) { case"客戶總覽": window.location.href = "all.php"; break; case"客戶新增": window.location.href = "add.php"; break; } }) </script> '; // header("Location:all.php"); } else { $mes = $idErr . $birErr . $phoneErr . $DateErr; echo '<script> swal({ text: "' . $mes . '", icon: "error", button: false, timer: 3000, }); </script>'; } } function test_input($data) { $data = trim($data); $data = stripslashes($data); $data = htmlspecialchars($data); return $data; } ?> <!-- Header --> <header id="header" class="alt"> <div class="logo"><a href="../../index/index.html">渡假村 <span>RESORT</span></a></div> <a href="#menu">Menu</a> </header> <!-- Nav --> <nav id="menu"> <ul class="links"> <li><a href="../../news/news.html">最新消息</a></li> <li><a href="../../room/room.php">訂房服務</a></li> <li><a href="../../search/search.php">查詢訂房</a></li> <li><a href="../../about/about.html">關於我們</a></li> <li><a href="../../information/information.php">聯絡資訊</a></li> <li style="margin-top: 200%"><a href="../maneger/maneger.php">管理者介面</a></li> <li style="margin-top: 0%"><a href="../php/logOut.php">登出</a></li> </ul> </nav> <section id="One" class="wrapper style3"> <div class="inner" style="z-index: 1"> <header class="align-center"> <h2>Maneger Page</h2> </header> </div> </section> <!--**************************--> <div class ="nav"> <ul id="navigation" style="z-index: 2; background:#F1EEC2;"> <li><a href="../userIndex.php" style="color:#000; ">主頁</a></li> <li class="sub"> <a href="#" style="color:#000; ">客戶</a> <ul style="z-index: 2; "> <li><a href="../customer/all.php">客戶總覽</a></li> <li><a href="../customer/add.php">新增</a></li> <li><a href="../customer/delete.php">刪除</a></li> <li><a href="../customer/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">員工</a> <ul style="z-index: 2"> <li><a href="../employee/all.php">員工總覽</a></li> <li><a href="../employee/add.php">新增</a></li> <li><a href="../employee/delete.php">刪除</a></li> <li><a href="../employee/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">訂單</a> <ul style="z-index: 2"> <li><a href="../order/all.php">訂單總覽</a></li> <li><a href="../order/add.php">新增</a></li> <li><a href="../order/delete.php">刪除</a></li> <li><a href="../order/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">報表</a> <ul style="z-index: 2"> <li><a href="/reports/import">進貨報表</a></li> <li><a href="/reports/export">銷貨報表</a></li> <li><a href="/reports/inventory">庫存報表</a></li> </ul> </li> </ul> </div> <div class="container"> <!--~~~~~~~~~~~~~~~~~--> <div class="content"> <h2>新增訂單</h2> <hr/> <form method="post" action="../../room2/room2.php"> <div class="6u 12u$(small)"> <p>姓名:</p> <input type="text" name="name" id="name" value="" placeholder="Name" required> </div> <br/> <div class="6u 12u$(small)"> <p>身分證字號:</p> <input type="text" name="id" id="id" value="" placeholder="ID" required> </div> <br/> <div class="6u$ 12u$(small)"> <p>生日:</p> <input type="date" name="bir" id="bir" value="" placeholder="yyyy-mm-dd" required> </div> <br/> <p>性別:</p> <div class="4u 12u$(small)"> <input type="radio" id="priority-low" name="priority" checked> <label for="priority-low">男</label> </div> <div class="4u$ 12u$(small)"> <input type="radio" id="priority-normal" name="priority"> <label for="priority-normal">女</label> </div> <br/> <div class="6u 12u$(xsmall)" ><p>手機:</p> <input type="text" name="phone" id="phone" value="" placeholder="Phone" required> </div> <br/> <div class="6u$ 12u$(xsmall)" ><p>E-mail:</p> <input type="email" name="email" id="email" value="" placeholder="email" required> </div> <div class="12u$"> <ul class="actions"> <div align="right" style="margin-right: 5%"> <li><input type="submit" name="next" value="ADD"></li> </div> </ul> </div> </form> </div> <!-- Scripts --> <script src="assets/js/jquery.min.js"></script> <script src="assets/js/jquery.scrollex.min.js"></script> <script src="assets/js/skel.min.js"></script> <script src="assets/js/util.js"></script> <script src="assets/js/main.js"></script> <script src="https://unpkg.com/sweetalert/dist/sweetalert.min.js"></script> </div> <!--~~~~~~~~~~~~~~~~~--> <div class="footer"> &copy; NTUB GROUP 10 </div> <!--**************************--> </body> </html> <file_sep>/maneger/employee/php/deleteFile.php <?php session_start(); include '../../../php/DataBase.php'; $db = DB(); $sql ="DELETE \n". "FROM\n". " \"員工\"\n". "WHERE\n". " 員工編號 ='". $_SESSION["dele_id"]."'"; $db->query($sql); $_SESSION["dele_sure"] = true; header('Location: ../delete.php');<file_sep>/maneger/userIndex.php <!doctype html> <?php session_start(); include '../php/FindOrder.php'; if ($_SESSION["acc"] == "") { header('Location: maneger.php'); $_SESSION["unLog"] = true; } ?> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>管理者介面</title> <!-- 連結思源中文及css --> <link href="https://fonts.googleapis.com/css?family=Noto+Sans+TC" rel="stylesheet"> <link href="../../images/user.jpg" rel="icon"> <link href="css/main.css" rel="stylesheet"> <link href="css/menu.css" rel="stylesheet"> <link href="assets/css/main.css" rel="stylesheet"> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <!-------------------------> </head> <body> <!-- Header --> <header id="header" class="alt"> <div class="logo"><a href="../index/index.html">TaipeiMRT <span>RESORT</span></a></div> <a href="#menu">Menu</a> </header> <!-- Nav --> <nav id="menu"> <ul class="links"> <li><a href="../../news/news.html">最新消息</a></li> <li><a href="../../room/room.php">\服務</a></li> <li><a href="../../room/roomSpace.php">查詢行程</a></li> <li><a href="../../about/about.html">關於我們</a></li> <li><a href="../../information/information.php">聯絡資訊</a></li> <li style="margin-top: 200%"><a href="../maneger/maneger.php">管理者介面</a></li> <li style="margin-top: 0%"><a href="../maneger/php/logOut.php">登出</a></li> </ul> </nav> <section id="One" class="wrapper style3"> <div class="inner" style="z-index: 1"> <header class="align-center"> <h2>Maneger Page</h2> </header> </div> </section> <!--**************************--> <div class ="nav"> <ul id="navigation" style="z-index: 2; background:#F1EEC2;"> <li><a href="userIndex.php" style="color:#000; ">主頁</a></li> <li class="sub"> <a href="#" style="color:#000; ">會員</a> <ul style="z-index: 2; "> <li><a href="customer/all.php">會員總覽</a></li> <li><a href="customer/add.php">新增</a></li> <li><a href="customer/delete.php">刪除</a></li> <li><a href="customer/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">管理員</a> <ul style="z-index: 2"> <li><a href="employee/all.php">管理員總覽</a></li> <li><a href="employee/add.php">新增</a></li> <li><a href="employee/delete.php">刪除</a></li> <li><a href="employee/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">行程</a> <ul style="z-index: 2"> <li><a href="order/all.php">行程總覽</a></li> <li><a href="order/delete.php">刪除</a></li> <li><a href="order/change.php">更新</a></li> </ul> </li> </ul> </div> <div class="container"> <!--~~~~~~~~~~~~~~~~~--> <div class="content"> <h2>歡迎<?php echo $_SESSION["acc"]; ?></h2> <h3><?php echo date('Y-M-D'); ?></h3> </div> <!-- Scripts --> <script src="assets/js/jquery.min.js"></script> <script src="assets/js/jquery.scrollex.min.js"></script> <script src="assets/js/skel.min.js"></script> <script src="assets/js/util.js"></script> <script src="assets/js/main.js"></script> </div> <!--~~~~~~~~~~~~~~~~~--> <div class="footer"> &copy; NTUB GROUP </div> <!--**************************--> </body> </html> <file_sep>/php/DataBase.php <?php function DB(){ $hostname = 'ec2-34-225-82-212.compute-1.amazonaws.com'; $username = 'seuuuplrvwsfbi'; $password = '<PASSWORD>'; $db_name = "ddhbf9d4bi4ap"; try { $db = new PDO("pgsql:host=" . $hostname . ";dbname=" . $db_name, $username, $password, array(PDO::MYSQL_ATTR_INIT_COMMAND => "SET NAMES utf8")); //PDO::MYSQL_ATTR_INIT_COMMAND 設定編碼 //echo '連線成功'; $db->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION); //錯誤訊息提醒 return $db; // $db = null; //結束與資料庫連線 } catch (PDOException $e) { //error message echo $e->getMessage(); } } ?> <file_sep>/maneger/customer/delete.php <!doctype html> <?php session_start(); include '../../php/FindOrder.php'; @logInSure(); ?> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>刪除會員</title> <!-- 連結思源中文及css --> <link href="https://fonts.googleapis.com/css?family=Noto+Sans+TC" rel="stylesheet"> <link href="../images/user.jpg" rel="icon"> <link href="css/main.css" rel="stylesheet"> <link href="css/menu.css" rel="stylesheet"> <link href="assets/css/main.css" rel="stylesheet"> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <script src="assets/js/sweetalert.min.js" type="text/javascript"></script> <script src="https://unpkg.com/sweetalert/dist/sweetalert.min.js"></script> <!-------------------------> </head> <body> <?php if (isset($_SESSION["dele_sure"])) { if ($_SESSION["dele_sure"]) { echo '<script> swal({ text: "刪除成功!", icon: "success", button: false, timer: 3000, }); </script>'; $_SESSION["dele_sure"] = false; } } $sure = true; if (isset($_POST["Reg"])) { if (empty($_POST["id"])) { $nameErr = "姓名是必填的!"; $sure = false; } if ($sure) { $_SESSION["dele_id"] = $_POST["id"]; echo ' <script> swal({ title: "確定刪除?", text: "你將無法恢復此資料!", icon: "warning", dangerMode: true, buttons: { 1: { text: "取消", value: "取消", }, 2: { text: "確定刪除!", value: "確定刪除", }, }, }).then(function (value) { switch (value) { case"取消": window.location.href = "delete.php"; break; case"確定刪除": window.location.href = "php/deleteFile.php"; break; } }) </script> '; } } ?> <!-- Header --> <header id="header" class="alt"> <div class="logo"><a href="../../index/index.html">TaipeiMRT <span>RESORT</span></a></div> <a href="#menu">Menu</a> </header> <!-- Nav --> <nav id="menu"> <ul class="links"> <li><a href="../../news/news.html">最新消息</a></li> <li><a href="../../room/room.php">行程服務</a></li> <li><a href="../../room/roomSpace.php">查詢行程</a></li> <li><a href="../../about/about.html">關於我們</a></li> <li><a href="../../information/information.php">聯絡資訊</a></li> <li style="margin-top: 200%"><a href="../maneger/maneger.php">管理者介面</a></li> <li style="margin-top: 0%"><a href="../php/logOut.php">登出</a></li> </ul> </nav> <section id="One" class="wrapper style3"> <div class="inner" style="z-index: 1"> <header class="align-center"> <h2>Maneger Page</h2> </header> </div> </section> <!--**************************--> <div class ="nav"> <ul id="navigation" style="z-index: 2; background:#F1EEC2;"> <li><a href="../userIndex.php" style="color:#000; ">主頁</a></li> <li class="sub"> <a href="#" style="color:#000; ">客戶</a> <ul style="z-index: 2; "> <li><a href="../customer/all.php">客戶總覽</a></li> <li><a href="../customer/add.php">新增</a></li> <li><a href="../customer/delete.php">刪除</a></li> <li><a href="../customer/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">員工</a> <ul style="z-index: 2"> <li><a href="../employee/all.php">員工總覽</a></li> <li><a href="../employee/add.php">新增</a></li> <li><a href="../employee/delete.php">刪除</a></li> <li><a href="../employee/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">訂單</a> <ul style="z-index: 2"> <li><a href="../order/all.php">訂單總覽</a></li> <li><a href="../order/delete.php">刪除</a></li> <li><a href="../order/change.php">更新</a></li> </ul> </li> </ul> </div> <div class="container"> <!--~~~~~~~~~~~~~~~~~--> <div class="content"> <h2>刪除會員</h2> <hr/> <form method="post" action=""> <div class="6u 12u$(small)"> <p>會員編號:</p> <input type="text" name="id" id="big" value="" placeholder="Number" required> <script> var url = location.href; //之後去分割字串把分割後的字串放進陣列中 var ary1 = url.split('?'); //此時ary1裡的內容為: //ary1[0] = 'index.aspx',ary2[1] = 'id=U001&name=GQSM' //下一步把後方傳遞的每組資料各自分割 var ary2 = ary1[1].split('&'); //此時ary2裡的內容為: //ary2[0] = 'id=U001',ary2[1] = 'name=GQSM' //最後如果我們要找id的資料就直接取ary[0]下手,name的話就是ary[1] var ary3 = ary2[0].split('='); //此時ary3裡的內容為: //ary3[0] = 'id',ary3[1] = 'U001' //取得id值 var id = ary3[1]; var aee = 10; document.getElementById("big").value = id; </script> </div> <div class="12u$"> <ul class="actions"> <div align="right" style="margin-right: 5%"> <li><input type="submit" name="Reg" value="刪除"></li> </div> </ul> </div> </form> </div> <!-- Scripts --> <script src="assets/js/jquery.min.js"></script> <script src="assets/js/jquery.scrollex.min.js"></script> <script src="assets/js/skel.min.js"></script> <script src="assets/js/util.js"></script> <script src="assets/js/main.js"></script> <script> function getQueryVariable(variable) { var query = window.location.search.substring(1); var vars = query.split("&"); for (var i = 0; i < vars.length; i++) { var pair = vars[i].split("="); if (pair[0] == variable) { return pair[1]; } } return(false); } </script> </div> <!--~~~~~~~~~~~~~~~~~--> <div class="footer"> &copy; NTUB GROUP </div> <!--**************************--> </body> </html> <file_sep>/maneger/order/change2.php <!doctype html> <?php session_start(); include '../../php/FindOrder.php'; include_once '../../php/DataBase.php'; @logInSure(); ?> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>更新訂單</title> <!-- 連結思源中文及css --> <link href="https://fonts.googleapis.com/css?family=Noto+Sans+TC" rel="stylesheet"> <link href="../../images/user.jpg" rel="icon"> <link href="css/main.css" rel="stylesheet"> <link href="css/menu.css" rel="stylesheet"> <link href="assets/css/main.css" rel="stylesheet"> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <script src="assets/js/sweetalert.min.js" type="text/javascript"></script> <script src="https://unpkg.com/sweetalert/dist/sweetalert.min.js"></script> <!-------------------------> </head> <body> <?php $idNumErr = $cusidErr = $roomidErr = $resDateErr = $numErr = $bedErr = ""; $idNum = $cusid = $roomid = $num = $resDate = $bed = ""; $sure = true; if (isset($_POST["Reg"])) { $cusid = $_POST["cusid"]; $roomid = $_POST["roomid"]; $resDate = $_POST["resDate"]; $num = $_POST["num"]; $bed = $_POST["bed"]; if (empty($_POST["cusid"])) { $nameErr = "顧客編號是必填的!"; $sure = false; } if (empty($_POST["roomid"])) { $roomidErr = "房型編號是必填的!"; $sure = false; } if (empty($_POST["resDate"])) { $resDateErr = "訂房日期是必填的!"; $sure = false; } else { // $date = (strtotime($bir) - strtotime(date('Y-m-d'))) / (365*3+366); $age = round((time() - strtotime($resDate))); if ($age > 0) { $resDateErr = "訂房日期不能是過去!"; $sure = false; } } if (empty($_POST["num"])) { $numErr = "訂購間數是必填的!"; $sure = false; } if (empty($_POST["bed"])) { $bedErr = "加床是必填的!"; $sure = false; }else{ if($_POST["bed"]>2 || $_POST["bed"] <0){ $bedErr = "加床不可超過限制!"; $sure = false; } } if ($sure) { $db = DB(); $sql = "UPDATE \"顧客訂房\" \n" . "SET \"訂單編號\" = ".$_SESSION["idNum"].",\n" . "\"顧客編號\" = ".$_POST["cusid"].",\n" . "\"房型編號\" = '".$_POST["roomid"]."',\n" . "\"訂房日期\" = '".$_POST["resDate"]."',\n" . "\"訂購間數\" = ".$_POST["num"].",\n" . "\"加床\" = ".$_POST["bed"]."\n" . "WHERE\n" . " \"訂單編號\" =" . $_SESSION["idNum"]; $db->query($sql); // echo 'swal("新增成功!", "回到訂單總覽 或是 訂單新增?", "success").then(function (result) { // // window.location.href = "http://tw.yahoo.com"; // }); '; echo ' <script> swal({ title: "更改成功!", text: "回到訂單總覽 或是 更新訂單?", icon: "success", buttons: { 1: { text: "訂單總覽", value: "訂單總覽", }, 2: { text: "更新訂單", value: "更新訂單", }, }, }).then(function (value) { switch (value) { case"訂單總覽": window.location.href = "all.php"; break; case"更新訂單": window.location.href = "change.php"; break; } }) </script> '; // header("Location:all.php"); } else { $mes = $nameErr . $roomidErr . $resDateErr .$numErr . $bedErr; echo '<script> swal({ text: "' . $mes . '", icon: "error", button: false, timer: 3000, }); </script>'; } } function test_input($data) { $data = trim($data); $data = stripslashes($data); $data = htmlspecialchars($data); return $data; } ?> <!-- Header --> <header id="header" class="alt"> <div class="logo"><a href="../../index/index.html">渡假村 <span>RESORT</span></a></div> <a href="#menu">Menu</a> </header> <!-- Nav --> <nav id="menu"> <ul class="links"> <li><a href="../../news/news.html">最新消息</a></li> <li><a href="../../room/room.php">訂房服務</a></li> <li><a href="../../room/roomSpace.php">查詢空房</a></li> <li><a href="../../search/search.php">查詢訂房</a></li> <li><a href="../../about/about.html">關於我們</a></li> <li><a href="../../information/information.php">聯絡資訊</a></li> <li style="margin-top: 200%"><a href="../maneger/maneger.php">管理者介面</a></li> <li style="margin-top: 0%"><a href="../php/logOut.php">登出</a></li> </ul> </nav> <section id="One" class="wrapper style3"> <div class="inner" style="z-index: 1"> <header class="align-center"> <h2>Maneger Page</h2> </header> </div> </section> <!--**************************--> <div class ="nav"> <ul id="navigation" style="z-index: 2; background:#F1EEC2;"> <li><a href="../userIndex.php" style="color:#000; ">主頁</a></li> <li class="sub"> <a href="#" style="color:#000; ">客戶</a> <ul style="z-index: 2; "> <li><a href="../customer/all.php">客戶總覽</a></li> <li><a href="../customer/add.php">新增</a></li> <li><a href="../customer/delete.php">刪除</a></li> <li><a href="../customer/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">員工</a> <ul style="z-index: 2"> <li><a href="../employee/all.php">員工總覽</a></li> <li><a href="../employee/add.php">新增</a></li> <li><a href="../employee/delete.php">刪除</a></li> <li><a href="../employee/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">訂單</a> <ul style="z-index: 2"> <li><a href="../order/all.php">訂單總覽</a></li> <li><a href="../order/delete.php">刪除</a></li> <li><a href="../order/change.php">更新</a></li> </ul> </li> </ul> </div> <div class="container"> <!--~~~~~~~~~~~~~~~~~--> <div class="content"> <h2>更新訂單</h2> <hr/> <p>訂單編號:<?php echo $_SESSION["idNum"]; ?></p> <br> <br> <form method="post" action=""> <div class="6u 12u$(small)"> <p>顧客編號:</p> <input type="number" name="cusid" id="cusid" value="<?php echo $_SESSION["cusid"]; ?>" placeholder="Name" required> </div> <br/> <div class="6u 12u$(small)"> <p>房型編號:</p> <input type="text" name="roomid" id="roomid" value="<?php echo $_SESSION["roomid"]; ?>" placeholder="R001-R010" required> </div> <br/> <div class="6u$ 12u$(small)"> <p>訂房日期:</p> <input type="date" name="resDate" id="resDate" value="<?php echo $_SESSION["resDate"]; ?>" placeholder="yyyy-mm-dd" required> </div> <br/> <p>間數:</p> <div class="12u$"> <div class="select-wrapper"> <input type="number" name="num" id="num" value="<?php echo $_SESSION["num"]; ?>" placeholder="1-4" required> </div> </div> <br/> <p>加床(張數):</p> <div class="12u$"> <div class="select-wrapper"> <input type="number" name="bed" id="bed" value="<?php echo $_SESSION["bed"]; ?>" placeholder="0-2" required> </div> </div> </div> <div class ="Err" style="color:red;"> <?php echo "<p>" . $cusidErr . "</p>"; echo "<p>" . $roomidErr . "</p>"; echo "<p>" . $resDateErr . "</p>"; echo "<p>" . $numErr . "</p>"; echo "<p>" . $bedErr . "</p>"; ?> </div> <div class="12u$"> <ul class="actions"> <div align="right" style="margin-right: 5%"> <li><input type="submit" name="Reg" value="ADD"></li> </div> </ul> </div> </form> </div> <!-- Scripts --> <script src="assets/js/jquery.min.js"></script> <script src="assets/js/jquery.scrollex.min.js"></script> <script src="assets/js/skel.min.js"></script> <script src="assets/js/util.js"></script> <script src="assets/js/main.js"></script> </div> <!--~~~~~~~~~~~~~~~~~--> <div class="footer"> &copy; NTUB GROUP 10 </div> <!--**************************--> </body> </html><file_sep>/maneger/employee/search.php <!doctype html> <?php session_start(); include '../../php/FindOrder.php'; @logInSure(); ?> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>查詢員工</title> <!-- 連結思源中文及css --> <link href="https://fonts.googleapis.com/css?family=Noto+Sans+TC" rel="stylesheet"> <link href="../../images/user.jpg" rel="icon"> <link href="css/main.css" rel="stylesheet"> <link href="css/menu.css" rel="stylesheet"> <link href="assets/css/main.css" rel="stylesheet"> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <!-------------------------> </head> <body> <?php LogInSure(); ?> <!-- Header --> <header id="header" class="alt"> <div class="logo"><a href="../index/index.html">渡假村 <span>RESORT</span></a></div> <a href="#menu">Menu</a> </header> <!-- Nav --> <nav id="menu"> <ul class="links"> <li><a href="../../news/news.html">最新消息</a></li> <li><a href="../../room/room.php">訂房服務</a></li> <li><a href="../room/roomSpace.php">查詢空房</a></li> <li><a href="../../search/search.php">查詢訂房</a></li> <li><a href="../../about/about.html">關於我們</a></li> <li><a href="../../information/information.php">聯絡資訊</a></li> <li style="margin-top: 200%"><a href="../maneger/maneger.php">管理者介面</a></li> <li style="margin-top: 0%"><a href="../php/logOut.php">登出</a></li> </ul> </nav> <section id="One" class="wrapper style3"> <div class="inner" style="z-index: 1"> <header class="align-center"> <h2>Maneger Page</h2> </header> </div> </section> <!--**************************--> <div class ="nav"> <ul id="navigation" style="z-index: 2; background:#F1EEC2;"> <li><a href="../userIndex.php" style="color:#000; ">主頁</a></li> <li class="sub"> <a href="#" style="color:#000; ">客戶</a> <ul style="z-index: 2; "> <li><a href="../customer/all.php">客戶總覽</a></li> <li><a href="../customer/add.php">新增</a></li> <li><a href="../customer/delete.php">刪除</a></li> <li><a href="../customer/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">員工</a> <ul style="z-index: 2"> <li><a href="../employee/all.php">員工總覽</a></li> <li><a href="../employee/add.php">新增</a></li> <li><a href="../employee/delete.php">刪除</a></li> <li><a href="../employee/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">訂單</a> <ul style="z-index: 2"> <li><a href="../order/all.php">訂單總覽</a></li> <li><a href="../order/delete.php">刪除</a></li> <li><a href="../order/change.php">更新</a></li> </ul> </li> </ul> </div> <div class="container"> <!--~~~~~~~~~~~~~~~~~--> <div class="content"> <h2>查詢員工</h2> <hr/> <form method="post" action="../room2/room2.html"> <div class="6u 12u$(small)"> <p>員工編號:</p> <input type="text" name="name" id="name" value="" placeholder="Number" required> </div> <div class="12u$"> <ul class="actions"> <div align="right" style="margin-right: 5%"> <li><input type="submit" name="next" value="查詢"></li> </div> </ul> </div> </form> </div> <!-- Scripts --> <script src="assets/js/jquery.min.js"></script> <script src="assets/js/jquery.scrollex.min.js"></script> <script src="assets/js/skel.min.js"></script> <script src="assets/js/util.js"></script> <script src="assets/js/main.js"></script> </div> <!--~~~~~~~~~~~~~~~~~--> <div class="footer"> &copy; NTUB GROUP 10 </div> <!--**************************--> </body> </html> <file_sep>/maneger/order/all.php <!doctype html> <?php session_start(); include '../../php/FindOrder.php'; @include '../../DataBase.php'; @logInSure(); ?> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>新增訂單</title> <!-- 連結思源中文及css --> <link href="https://fonts.googleapis.com/css?family=Noto+Sans+TC" rel="stylesheet"> <link href="../../images/user.jpg" rel="icon"> <link href="css/main.css" rel="stylesheet"> <link href="css/menu.css" rel="stylesheet"> <link href="assets/css/main.css" rel="stylesheet"> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <!-------------------------> </head> <body> <!-- Header --> <header id="header" class="alt"> <div class="logo"><a href="../../index/index.html">渡假村 <span>RESORT</span></a></div> <a href="#menu">Menu</a> </header> <!-- Nav --> <nav id="menu"> <ul class="links"> <li><a href="../../news/news.html">最新消息</a></li> <li><a href="../../room/room.php">訂房服務</a></li> <li><a href="../../room/roomSpace.php">查詢空房</a></li> <li><a href="../../search/search.php">查詢訂房</a></li> <li><a href="../../about/about.html">關於我們</a></li> <li><a href="../../information/information.php">聯絡資訊</a></li> <li style="margin-top: 200%"><a href="../maneger/maneger.php">管理者介面</a></li> <li style="margin-top: 0%"><a href="../php/logOut.php">登出</a></li> </ul> </nav> <section id="One" class="wrapper style3"> <div class="inner" style="z-index: 1"> <header class="align-center"> <h2>Maneger Page</h2> </header> </div> </section> <!--**************************--> <div class ="nav"> <ul id="navigation" style="z-index: 2; background:#F1EEC2;"> <li><a href="../userIndex.php" style="color:#000; ">主頁</a></li> <li class="sub"> <a href="#" style="color:#000; ">客戶</a> <ul style="z-index: 2; "> <li><a href="../customer/all.php">客戶總覽</a></li> <li><a href="../customer/add.php">新增</a></li> <li><a href="../customer/delete.php">刪除</a></li> <li><a href="../customer/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">員工</a> <ul style="z-index: 2"> <li><a href="../employee/all.php">員工總覽</a></li> <li><a href="../employee/add.php">新增</a></li> <li><a href="../employee/delete.php">刪除</a></li> <li><a href="../employee/change.php">更新</a></li> </ul> </li> <li class="sub"> <a href="#" style="color:#000; ">訂單</a> <ul style="z-index: 2"> <li><a href="../order/all.php">訂單總覽</a></li> <li><a href="../order/delete.php">刪除</a></li> <li><a href="../order/change.php">更新</a></li> </ul> </li> </ul> </div> <div class="container"> <!--~~~~~~~~~~~~~~~~~--> <div class="content"> <h2>訂單總覽</h2> <hr/> <?php $db = DB(); $sql = "SELECT * FROM \"顧客訂房\" ORDER BY \"訂單編號\""; $result = $db->query($sql); // echo '<table border="1">'; // while ($row = $result->fetch(PDO::FETCH_OBJ)) { ////PDO::FETCH_OBJ 指定取出資料的型態 // echo '<tr>'; // echo '<td>' . $row->顧客編號 . "</td><td>" . $row->顧客名稱 . "</td>"; // echo '</tr>'; // } // echo '</table>'; ?> <P> 搜尋訂單:</p><input type="search" class="light-table-filter" data-table="order-table" placeholder="請輸入關鍵字"> <table id="table-3" class="order-table" > <thead> <!--必填--> <tr> <th>訂單編號</th> <th >顧客編號</th> <th >房型編號</th> <th>訂房日期</th> <th>訂購間數</th> <th>加床</th> <th>更新</th> <th>刪除</th> </tr> </thead> <tbody> <?php while ($row = $result->fetch(PDO::FETCH_OBJ)) { //PDO::FETCH_OBJ 指定取出資料的型態 echo '<tr>'; echo '<td>' . $row->訂單編號 . "</td>" . "<td>" . $row->顧客編號 . "</td>" . "<td>" . $row->房型編號 . "</td>" . "<td>" . $row->訂房日期 . "</td>" . "<td>" . $row->訂購間數 . "</td>" . "<td>" . $row->加床 . "</td>" . "<td> <button type=\"button\" onclick='location.href=\"change.php?id=" . $row->訂單編號 . "\"'>更新</button></td>" . "<td> <button type=\"button\" onclick='location.href=\"delete.php?id=" . $row->訂單編號 . "\"'>刪除</button></td>"; echo '</tr>'; } ?> </tbody> </table> </div> <!-- Scripts --> <script src="assets/js/jquery.min.js"></script> <script src="assets/js/jquery.scrollex.min.js"></script> <script src="assets/js/skel.min.js"></script> <script src="assets/js/util.js"></script> <script src="assets/js/main.js"></script> <script language="javascript"> </script> </div> <!--~~~~~~~~~~~~~~~~~--> <div class="footer"> &copy; NTUB GROUP 10 </div> <!--**************************--> <script> (function (document) { 'use strict'; // 建立 LightTableFilter var LightTableFilter = (function (Arr) { var _input; // 資料輸入事件處理函數 function _onInputEvent(e) { _input = e.target; var tables = document.getElementsByClassName(_input.getAttribute('data-table')); Arr.forEach.call(tables, function (table) { Arr.forEach.call(table.tBodies, function (tbody) { Arr.forEach.call(tbody.rows, _filter); }); }); } // 資料篩選函數,顯示包含關鍵字的列,其餘隱藏 function _filter(row) { var text = row.textContent.toLowerCase(), val = _input.value.toLowerCase(); row.style.display = text.indexOf(val) === -1 ? 'none' : 'table-row'; } return { // 初始化函數 init: function () { var inputs = document.getElementsByClassName('light-table-filter'); Arr.forEach.call(inputs, function (input) { input.oninput = _onInputEvent; }); } }; })(Array.prototype); // 網頁載入完成後,啟動 LightTableFilter document.addEventListener('readystatechange', function () { if (document.readyState === 'complete') { LightTableFilter.init(); } }); })(document); </script> </body> </html> <file_sep>/php/findRoomSpace.php <?php session_start(); include 'DataBase.php'; function findRoomSpace($date) { $db = DB(); $sql = "SELECT\n" . " * \n" . "FROM\n" . " 房型資料 "; $result = $db->query($sql); while ($row = $result->fetch(PDO::FETCH_OBJ)) { //PDO::FETCH_OBJ 指定取出資料的型態` switch ($row->房型編號) { case "R001" : $_SESSION["R001"] = $row->總間數; break; case "R002" : $_SESSION["R002"] = $row->總間數; break; case "R003" : $_SESSION["R003"] = $row->總間數; break; case "R004" : $_SESSION["R004"] = $row->總間數; break; case "R005" : $_SESSION["R005"] = $row->總間數; break; case "R006" : $_SESSION["R006"] = $row->總間數; break; case "R007" : $_SESSION["R007"] = $row->總間數; break; case "R008" : $_SESSION["R008"] = $row->總間數; break; case "R009" : $_SESSION["R009"] = $row->總間數; break; case "R010" : $_SESSION["R010"] = $row->總間數; break; } } //---------------------------------------------------------------- $sql = "SELECT\n" . " * \n" . "FROM\n" . " 顧客訂房 \n" . "WHERE\n" . " 訂房日期 = '" . $date . "'"; $result = $db->query($sql); while ($row = $result->fetch(PDO::FETCH_OBJ)) { //PDO::FETCH_OBJ 指定取出資料的型態 switch ($row->房型編號) { case "R001" : $_SESSION["R001"] -= (int)$row->訂購間數; break; case "R002" : $_SESSION["R002"] -= (int)$row->訂購間數; break; case "R003" : $_SESSION["R003"] -= (int)$row->訂購間數; break; case "R004" : $_SESSION["R004"] -=(int)$row->訂購間數; break; case "R005" : $_SESSION["R005"] -= (int)$row->訂購間數; break; case "R006" : $_SESSION["R006"] -= (int)$row->訂購間數; break; case "R007" : $_SESSION["R007"] -= (int)$row->訂購間數; break; case "R008" : $_SESSION["R008"] -= (int)$row->訂購間數; break; case "R009" : $_SESSION["R009"] -= (int)$row->訂購間數; break; case "R010" : $_SESSION["R010"] -= (int)$row->訂購間數; break; } } }
aabcabf9c16056843a1a8802a7f5625a27967dd0
[ "Markdown", "PHP" ]
14
Markdown
DATGGolo/website-manager
67944bd1a9f6c50316b505974e30efea40a9121f
16607e0f1e2481687e4ea6d8efdc889ef644b141
refs/heads/master
<repo_name>mdrustam21/mysite<file_sep>/README.md # mysite My site is very important
fcf290a8b7ae5463c03081e23b6e511c85aec368
[ "Markdown" ]
1
Markdown
mdrustam21/mysite
af1a346e3961beea4bae1e6794ddcbdc63d20ac1
da9447275bed406a1ea1b2fad9b18caf8011dfbb
refs/heads/master
<file_sep>/** * @format */ import 'react-native'; import React from 'react'; import {generateWallet} from '../app/ether/wallet'; it('Ether钱包创建失败', async () => { // try { // const result = await generateWallet(); // const {mnemonic, privateKey, publicKey} = result; // console.log('获取到的钱包信息:', mnemonic, privateKey, publicKey); // console.log(); // } catch (error) { // expect(error).toEqual({error: 'Ether钱包创建失败'}); // } }); <file_sep>import React, {useEffect, useState} from 'react'; import {SafeAreaView, View} from 'react-native'; import {queryAISBalance, queryEtherBalance} from '@works/ether/transaction'; import AssetsPage from './components/AssetsPage'; import {Button} from 'react-native-elements'; import EtherToken from '@model/token'; import {FakeTokens} from '@works/FakeTokens'; import FocusAwareStatusBar from '@libs/react-navigation/FocusAwareStatusBar'; import MyWallet from '@model/wallet'; import ScrollableTabView from 'react-native-scrollable-tab-view'; import {StackScreenProps} from '@react-navigation/stack'; import styles from './styles'; import {tokensTracking} from '@works/ether/balances'; import {useNavigation} from '@react-navigation/native'; interface Props { wallets: MyWallet[]; } const Assets = (props: Props) => { const {wallets} = props; const navigation = useNavigation(); useEffect(() => {}, [1]); return ( <View style={styles.container}> <FocusAwareStatusBar translucent={true} backgroundColor="transparent" barStyle="dark-content" /> <AssetsPage tokens={tokensTracking} /> {/* <ScrollableTabView>{wallets.map((token) => {})}</ScrollableTabView> */} </View> ); }; export default Assets; <file_sep>import React, {useState} from 'react'; import Ecoloy from '@screens/ecology'; import {createStackNavigator} from '@react-navigation/stack'; const EcologyStack = createStackNavigator(); const EcologyNavigator = () => ( <EcologyStack.Navigator> <EcologyStack.Screen name="ecology" component={Ecoloy} options={{title: '生态'}} /> </EcologyStack.Navigator> ); export default EcologyNavigator; <file_sep>import React, {PureComponent} from 'react'; import {NavigationContainer} from '@react-navigation/native'; import RootNavigator from '@navigations/root'; const App = () => ( <NavigationContainer> <RootNavigator></RootNavigator> </NavigationContainer> ); export default App; <file_sep>/** 隐藏顶部导航栏的分割线 */ export const hideNavigationBarDivider = { shadowOffset: {width: 0, height: 0}, elevation: 0, }; <file_sep>/** * Sample React Native App * https://github.com/facebook/react-native * * Generated with the TypeScript template * https://github.com/react-native-community/react-native-template-typescript * * @format */ import React from 'react' import App from '@navigations/app' export default App <file_sep>export const navigationTitleSize = 18; /** 设置页面 单元格样式 */ export const settingsTitleSize = 14; export const settingsHintSize = 12; <file_sep>module.exports = { presets: ['module:metro-react-native-babel-preset'], plugins: [ [ 'module-resolver', { root: ['./app'], extensions: [ '.ios.ts', '.android.ts', '.ts', '.ios.tsx', '.android.tsx', '.tsx', '.jsx', '.js', '.json', ], alias: { '@config': './app/config', '@components': './app/components', '@libs': './app/libs/', '@logic': './app/logic', '@model': './app/model', '@navigations': './app/navigations', '@public': './app/public', '@private': './app/__private/', '@resource': './app/resource', '@screens': './app/screens', '@works': './app/works', }, }, ], ], }; <file_sep>import {Avatar, Card, CardProps, Icon, ListItem} from 'react-native-elements'; import {FlatList, Text, View} from 'react-native'; import React, {PureComponent} from 'react'; import FocusAwareStatusBar from '@libs/react-navigation/FocusAwareStatusBar'; import {ScrollView} from 'react-native-gesture-handler'; import {screenContainerStyle} from '@resource/styles'; import styles from './styles'; const SettingCard = (props: CardProps | any) => { return ( <Card containerStyle={{ borderRadius: 10, overflow: 'hidden', padding: 0, }} {...props} /> ); }; const Me = () => ( <View style={screenContainerStyle}> <FocusAwareStatusBar translucent={true} backgroundColor="transparent" barStyle="dark-content" /> <ScrollView> <SettingCard> <View style={{flexDirection: 'row', alignItems: 'center'}}> <Avatar title="DK" rounded size="large" containerStyle={styles.avatarContainer} source={{ uri: 'https://upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Ethereum-icon-purple.svg/1200px-Ethereum-icon-purple.svg.png', }} /> <Text style={{fontSize: 16}}>混世大魔王</Text> </View> </SettingCard> <SettingCard> <ListItem chevron={true} bottomDivider={true} title="挖矿奖励" leftIcon={<Icon name="gift" type="font-awesome" />} /> <ListItem chevron={true} bottomDivider={true} title="邀请好友" leftIcon={<Icon name="paper-plane-o" type="font-awesome" />} /> <ListItem chevron={true} bottomDivider={true} title="我的社区" leftIcon={<Icon name="heartbeat" type="font-awesome" />} /> </SettingCard> <SettingCard> <ListItem chevron={true} title="联系客服" leftIcon={<Icon name="headset-mic" type="material" />} /> <ListItem topDivider={true} chevron={true} title="系统公告" leftIcon={<Icon name="bell-o" type="font-awesome" />} /> </SettingCard> <SettingCard> <ListItem bottomDivider={true} title="设置" chevron={true} leftIcon={<Icon name="gear" type="font-awesome" />} /> <ListItem title="关于" chevron={true} leftIcon={<Icon name="info" type="font-awesome" />} /> </SettingCard> <View style={{height: 20}} /> </ScrollView> </View> ); export default Me; <file_sep>export const contractAddress = '0xC55292B9c883A8abAf91F3620944E8C7606fc715'; const daiAbi = [ // Some details about the token 'function name() view returns (string)', 'function symbol() view returns (string)', // Get the account balance 'function balanceOf(address) view returns (uint)', // Send some of your tokens to someone else 'function transfer(address to, uint amount)', // An event triggered whenever anyone transfers to someone else 'event Transfer(address indexed from, address indexed to, uint amount)', ]; export const balanceAbi = [ { constant: true, inputs: [{name: '_owner', type: 'address'}], name: 'balanceOf', outputs: [{name: '', type: 'uint256'}], payable: false, stateMutability: 'view', type: 'function', }, ]; <file_sep>import EtherToken from 'model/token'; export default { symbol: 'DPS', contactAddress: '0x449D737F0B42bDa8fc267bc8E84fE6a51Dcae293', } as EtherToken; <file_sep>import DPS from './DPS'; import FAIS from './FAIS'; export const FakeTokens = [FAIS, DPS]; <file_sep>import React, {PureComponent} from 'react'; import {Text, View} from 'react-native'; const Wallets = () => ( <View> <Text>钱包</Text> </View> ); export default Wallets; <file_sep>import {FlatList, Image, ListRenderItemInfo} from 'react-native'; import {Icon, ListItem} from 'react-native-elements'; import React, {Component} from 'react'; import EtherToken from '@model/token'; type Props = { tokens: EtherToken[]; }; const AssetsPage = (props: Props) => { const {tokens} = props; return ( <FlatList data={tokens} renderItem={renderItem} keyExtractor={(token) => token.symbol} /> ); }; const renderItem = (info: ListRenderItemInfo<EtherToken>) => { const {item: token, index} = info; const {symbol, icon} = token; const leftIcon = icon ? ( <Image source={icon} /> ) : ( <Icon name="dollar" type="font-awesome" /> ); return <ListItem leftIcon={leftIcon} title={symbol} bottomDivider />; }; export default AssetsPage; <file_sep>const randomColor = () => {} <file_sep>export const navigationBatTitleColor = '#FF0000'; export const screenContainerBackgroundColor = '#F5F5F5'; export const dividerColor = '#F3F3F3'; export const disableButtonColor = '#A5A5A5'; export const textColor = '#333'; <file_sep>import '@ethersproject/shims'; import * as _ from 'lodash'; import EtherToken from '@model/token'; import {FakeTokens} from '../FakeTokens'; import {ethers} from 'ethers'; const etherToken: EtherToken = {symbol: 'ETH'}; export const tokensTracking = [etherToken, ...FakeTokens]; const fetchBalance = async () => {}; const contains = ( token: EtherToken, _tokens: EtherToken[] = tokensTracking, ) => { return _.some(_tokens, {symbol: token.symbol}); }; /** 向数组中添加Token,添加成功返回true,否则返回false */ export const addToken = (token: EtherToken): boolean => { if (contains(token)) { return false; } tokensTracking.push(token); return true; }; /** 从数组中移除token */ export const removeToken = ( token: EtherToken, tokens: EtherToken[] = tokensTracking, ) => { _.remove(tokens, ({symbol}) => symbol === token.symbol); }; <file_sep>declare module 'react-native-crypto'; export function randomBytes( length: number, callback: (random: string, as: any) => void, ): number[]; <file_sep>import {ImageSourcePropType} from 'react-native'; export default interface EtherToken { symbol: string; icon?: ImageSourcePropType; contactAddress?: string; } <file_sep>import React, {useState} from 'react'; import Login from '@screens/login/index'; import Register from '@screens/register/index'; import {createStackNavigator} from '@react-navigation/stack'; const AuthStack = createStackNavigator(); const AuthNavigator = () => ( <AuthStack.Navigator mode={'modal'}> <AuthStack.Screen name="login" component={Login} options={{headerShown: false}}></AuthStack.Screen> <AuthStack.Screen name="register" component={Register}></AuthStack.Screen> </AuthStack.Navigator> ); export default AuthNavigator; <file_sep>import {StyleSheet} from 'react-native'; const styles = StyleSheet.create({ avatarContainer: { backgroundColor: '#8ace70', margin: 10, padding: 3, }, }); export default styles; <file_sep>export default interface MyWallet { name: string; encryptWallet: string; mnemonic?: string; password?: string; } <file_sep>import {PROJECT_ID, PROJECT_SECRET} from '@private/Infura'; import network from './network'; export const httpPoviderUrl = `https://${network}.infura.io/v3/${PROJECT_ID}`; export const websocketProviderUrl = `wss://${network}.infura.io/ws/v3/${PROJECT_ID}`; <file_sep>import React, {Component} from 'react'; import {StyleSheet, View} from 'react-native'; type Props = {}; const SettingsCell = (props: Props) => { return <View></View>; }; export default SettingsCell; const styles = StyleSheet.create({ cell: { backgroundColor: '#FFF', }, }); <file_sep>export const test = 'ropsten'; export const product = 'mainnet'; // export const network = __DEV__ ? test : product; export const network = test; export default network; <file_sep>import EtherToken from '@model/token'; export default { symbol: 'FAIS', contactAddress: '0xd6F045c13bE3043246375c51d0Cfda2eFeB5dD81', } as EtherToken; <file_sep>import '@ethersproject/shims'; import '../../shim'; import {ProgressCallback} from 'ethers/lib/utils'; import {Wallet} from 'ethers'; import {randomBytes} from 'react-native-crypto'; const secureEntropy = () => { return new Promise<Uint8Array>((resolve, reject) => { randomBytes(16, (error, entropy) => { if (error) { reject(error); } else { resolve(entropy); } }); }); }; export const generateWallet = async () => { const extraEntropy = await secureEntropy(); const wallet = Wallet.createRandom({extraEntropy}); if (__DEV__) { const {mnemonic, privateKey, publicKey} = wallet; console.log('获取到的钱包信息:', mnemonic, privateKey, publicKey); } return wallet; }; export const walletWithJSON = async (json: string, password: string = '') => { return Wallet.fromEncryptedJsonSync(json, password); }; export const walletWithPrivateKey = async ( privateKey: string, ): Promise<Wallet> => { return new Wallet(privateKey); }; export const walletWithMnemonic = async ( mnemoc: string, path: string | undefined = undefined, ) => { return Wallet.fromMnemonic(mnemoc, path); }; export const encyptWallet = async ( wallet: Wallet, password: string, progress: ProgressCallback, ) => { return wallet.encrypt(password, progress); }; export const saveWallet = async (walletName: string) => {}; <file_sep>import {Button, NativeModules, SafeAreaView, Text, View} from 'react-native'; import React, {useEffect, useState} from 'react'; import FocusAwareStatusBar from 'libs/react-navigation/FocusAwareStatusBar'; import {Header} from 'react-native-elements'; import {StackScreenProps} from '@react-navigation/stack'; import styles from './styles'; import {useNavigation} from '@react-navigation/native'; const {StatusBarManager} = NativeModules; const Home = (props: StackScreenProps<{}>) => { const navigation = useNavigation(); useEffect(() => {}, [1]); return ( <View style={styles.container}> <FocusAwareStatusBar translucent={true} backgroundColor="transparent" barStyle="dark-content" /> <SafeAreaView> <Button title="去登录" onPress={() => { navigation.navigate('auth'); }}></Button> </SafeAreaView> </View> ); }; export default Home; <file_sep>export default interface TabBarIconProps { focused: boolean; color: string; size: number; } <file_sep>import AuthNavigator from '@navigations/auth'; import MainNavigator from '@navigations/main'; import React from 'react'; import {createStackNavigator} from '@react-navigation/stack'; const RootStack = createStackNavigator(); const RootNavigator = () => { return ( <RootStack.Navigator mode="modal"> <RootStack.Screen name="main" component={MainNavigator} options={{headerShown: false}} /> <RootStack.Screen name="auth" component={AuthNavigator} /> </RootStack.Navigator> ); }; export default RootNavigator; <file_sep>import React, {useState} from 'react'; import Assets from '@screens/assets'; import {createStackNavigator} from '@react-navigation/stack'; const AssetsStack = createStackNavigator(); const AssetsNavigator = () => ( <AssetsStack.Navigator> <AssetsStack.Screen name="assets" component={Assets} options={{title: '资产'}} /> </AssetsStack.Navigator> ); export default AssetsNavigator; <file_sep>import React, {useState} from 'react'; import Home from '@screens/home'; import {createStackNavigator} from '@react-navigation/stack'; const HomeStack = createStackNavigator(); const HomeNavigator = () => ( <HomeStack.Navigator> <HomeStack.Screen name="home" component={Home} options={{title: '首页'}} /> </HomeStack.Navigator> ); export default HomeNavigator; <file_sep>import React, {useState} from 'react'; import Me from '@screens/me'; import {createStackNavigator} from '@react-navigation/stack'; const MeStack = createStackNavigator(); const MeNavigator = () => ( <MeStack.Navigator screenOptions={{ headerStyle: {shadowOffset: {width: 0, height: 0}, elevation: 0}, }}> <MeStack.Screen name="me" component={Me} options={{title: '我的'}} /> </MeStack.Navigator> ); export default MeNavigator; <file_sep># react-native-best-practice 总结个人在 React Native 项目中的经验,把自认为优秀的实践经验记录下来.以区块链项目为实例,做一次实实在在的总结。 ### 问题及解决方法 - Unable to resolve module `crypto` from `node_modules\sjcl\sjcl.js`: crypto could not be found within the project. ``` ./node_modules/.bin/rn-nodeify --hack --install ``` <file_sep>import React, { useState } from 'react' import { View, Text } from 'react-native' const Register = () => { return <View> <Text>注册页面</Text> </View> } export default Register<file_sep>import {ViewStyle} from 'react-native'; import {screenContainerBackgroundColor} from '@resource/colors'; export const screenContainerStyle: ViewStyle = { flex: 1, backgroundColor: screenContainerBackgroundColor, flexDirection: 'column', }; <file_sep>import { Button, NativeModules, SafeAreaView, StatusBar, Text, View, } from 'react-native'; import React, {useEffect, useState} from 'react'; import FocusAwareStatusBar from 'libs/react-navigation/FocusAwareStatusBar'; import {StackScreenProps} from '@react-navigation/stack'; import styles from './styles'; import {useNavigation} from '@react-navigation/native'; const {StatusBarManager} = NativeModules; const Home = (props: StackScreenProps<{}>) => { const navigation = useNavigation(); useEffect(() => {}, [1]); return ( <View style={styles.container}> <FocusAwareStatusBar translucent={true} backgroundColor="transparent" barStyle="dark-content" /> <SafeAreaView></SafeAreaView> </View> ); }; const useTimeout = (time: Number) => { const [isDone, setIsDone] = useState(false); useEffect(() => { const tim = setTimeout(() => { console.log('已经过了:', time); setIsDone(!isDone); }, time); return () => { clearTimeout(tim); }; }); return isDone; }; export default Home; <file_sep>import '@ethersproject/shims'; import {balanceAbi, contractAddress} from 'works/AIS'; import {ethers, providers} from 'ethers'; import {PROJECT_ID} from '@private/Infura'; import {address} from '@private/Wallet'; import {network} from './network'; export const infuraProvider = new providers.InfuraProvider(network, PROJECT_ID); export const queryEtherBalance = async () => { const balance = await infuraProvider.getBalance(address); console.log('账户余额', balance); }; export const queryAISBalance = async () => { const contract = new ethers.Contract( contractAddress, balanceAbi, infuraProvider, ); const balance = await contract.balanceOf(address); console.log('AIS余额', balance); }; <file_sep>import React, {PureComponent} from 'react'; import AssetsNavigator from './assets'; import EcologyNavigator from './ecology'; import HomeNavigator from './home'; import Icon from 'react-native-vector-icons/Ionicons'; import Icon2 from 'react-native-vector-icons/MaterialIcons'; import MeNavigator from './me'; import {createBottomTabNavigator} from '@react-navigation/bottom-tabs'; const Tab = createBottomTabNavigator(); const MainNavigator = () => { return ( <Tab.Navigator> <Tab.Screen name="homestack" component={HomeNavigator} options={{ title: '首页', tabBarIcon: (props) => <Icon name="home-outline" {...props} />, }} /> <Tab.Screen name="assetsstack" component={AssetsNavigator} options={{ title: '资产', tabBarIcon: (props) => <Icon2 name="attach-money" {...props} />, }} /> <Tab.Screen name="ecologystack" component={EcologyNavigator} options={{ title: '生态', tabBarIcon: (props) => ( <Icon name="ios-cloud-circle-outline" {...props} /> ), }} /> <Tab.Screen name="mestack" component={MeNavigator} options={{ title: '我的', tabBarIcon: (props) => <Icon name="ios-person-outline" {...props} />, }} /> </Tab.Navigator> ); }; export default MainNavigator;
415ebdff48e66d56aaf5e49ff2b88135d85ca5bf
[ "Markdown", "TypeScript", "JavaScript", "TSX" ]
39
Markdown
rnpartner/react-native-best-practice
2c5fecd112b8aca32e8af3e9b705b06e6c362bd4
6573f827b5a706a9c7434a09872bdb372e3d21a0
refs/heads/main
<file_sep>## 全局安装xl_base64 ``` npm install -g xl_base64 ``` ## base64 ``` xl_base64 file ``` 转为 base64
7443a8cb4053d2d2c03df95f44983d93f8c0714c
[ "Markdown" ]
1
Markdown
longfei59418888/xl_base64
935c783e1af305c78eb17f9c9c86be4efec89bc1
4c395a3be6cf0684517b4fa01ef5948d32f60c25
refs/heads/master
<file_sep>(define (compose f g) (lambda (x) (f (g x)))) ;; ((compose square inc) 6) will return 49 ;; ((compose inc square) 6) will return 37 <file_sep>The following will happens if the interpreter is asked to evaluate (f f): ``` . . procedure application: expected procedure, given: 2; arguments were: 2 ``` ``` (f f) (f 2) (2 2) ``` By substitution model above, in the first substitution the argument `f` is applied to the value 2, so a recursive call is made. In the second substitution, the argument 2 is applied to 2. Since 2 is not a procedure, an error is printed. <file_sep>package set import scala.annotation.tailrec object Set { val limit = 1000 type Set = Int => Boolean def exists(set: Set, element: Int): Boolean = set(element) def forall(predicate: (Int) => Boolean, set: Set): Boolean = { @tailrec def iterate(element: Int): Boolean = { if (exists(set, element) && !predicate(element)) false else if (element < -limit) true else iterate(element - 1) } iterate(limit) } } <file_sep>If the interpreter uses applicative-order evaluation, Ben's procedures will be evaluated as follow: ``` (test 0 (p)) (test 0 (p)) (test 0 (p)) ... ``` The operand test will evaluate to the body of its procedure as it evaluates the arguments and then apply, the symbol 0 will evaluate to its value, and the operand (p) will evaluate to a recursive call to itself, which will never stop recursing. If the interpreter uses normal-order evaluation, Ben's procedures will be evaluated as follow: ``` (test 0 (p)) (if (= 0 0) 0 (p)) (if true 0 (p)) 0 ``` If the interpreter uses normal-order evaluation, the (p) operand will not be evaluated until its value is needed as it will fully expand and then reduce. Since the conditional statement in the body is structured such that the second argument never needs to be evaluated, the entire test procedure will evaluate to 0 under normal-order evaluation. <file_sep>(define (double f) (lambda (x) (f (f x)))) ;; (((double (double double)) inc) 5) will return 21. <file_sep>(define (inc x) (+ x 1)) (define (square x) (* x x)) (define (compose f g) (lambda (x) (f (g x)))) (define (repeated f n) (if (= n 1) f (compose f (repeated f (- n 1))))) ;; ((repeated inc 3) 5) will return 8 ;; ((repeated inc 10) 11) will return 21 ;; ((repeated square 2) 5) will return 625 <file_sep>(define (cont-frac n d k) (define (iter counter result) (if (< counter 1) result (iter (- counter 1) (/ (n counter) (+ (d counter) result))))) (iter k 0.0)) (define (d i) (if (not (= 0 (remainder (+ i 1) 3))) 1 (* 2 (/ (+ i 1) 3)))) (define e (+ 2 (cont-frac (lambda (i) 1.0) d 10))) <file_sep># Cake Solutions This is repo is created for completing exercises from "The Structure and Interpretation of Computer Programs" (at http://mitpress.mit.edu/sicp/full-text/book/book.html) The completed exercises is located in the folder `exercises`. ### Running the bonus point Follow these steps to run the code: - Enter `git clone https://github.com/kasonchan/cakesolutions.git`. Or Download `zip` file from `https://github.com/kasonchan/cakesolutions`. `Unzip` the file. - `cd` into the `cakesolutions` directory. - Enter `sbt test` to test. - Enter `sbt clean` to clean up the compiled files. <file_sep>The new `sqrt-iter` procedure defined in terms of new-if never finishes executing. In the `if` special form, after the predicate is evaluated, only one of the `consequent` or `alternative` will be evaluated. The `new-if` procedure do not have this property; therefore, the interpreter will use the applicative-order evaluation when it evaluates the `new-if` procedure. The else clause passed to `new-if` in the `sqrt-iter` procedure is always evaluated and `sqrt-iter` never stops making recursive calls to itself. <file_sep>package set import org.scalatest.{FlatSpec, Matchers} import set.Set.forall class SetSpec extends FlatSpec with Matchers { "Set" should "pass" in { val set1 = (x: Int) => x == 1 val predicate1 = (x: Int) => x > 2 forall(predicate1, set1) shouldBe false val set2 = (x: Int) => x == 10 val predicate2 = (x: Int) => x < 11 forall(predicate2, set2) shouldBe true val set3 = (x: Int) => 1001 <= x && x <= 1002 val predicate3 = (x: Int) => x <= 1 forall(predicate3, set3) shouldBe true val set4 = (x: Int) => 1001 <= x && x <= 1002 val predicate4 = (x: Int) => x >= 1 forall(predicate4, set4) shouldBe true val set5 = (x: Int) => x >= 0 && x <= 10 val predicate5 = (x: Int) => x >= 0 forall(predicate5, set5) shouldBe true val set6 = (x: Int) => x >= 0 && x <= 10 val predicate6 = (x: Int) => x < 0 forall(predicate6, set6) shouldBe false } }
dc4354792f557af7eec3e86f64de450b9b62a696
[ "GCC Machine Description", "Markdown", "Scala" ]
10
GCC Machine Description
kasonchan/cakesolutions
79b22f3cbe4b77259b10e81e9fdab9e5ee6e4ee2
57d651b81d0874c4caa0a0345bb3b680b9a8b881
refs/heads/master
<file_sep>Python code for Cisco PRNE training program Mainly uploading this code in order to experiment with various network functionality using Python, specifically Telnet, SSH, sending CLI commands, receiving & parsing output, etc. <file_sep>#!/usr/bin/env python """ Example of a script that executes a CLI command on a remote device over established SSH connection. Administrator login options and CLI commands are device specific, thus this script needs to be adapted to a concrete device specifics. Current script assumes interaction with Cisco IOS device. NOTES: Requires installation of the 'paramiko' Python package: pip install paramiko The 'paramiko' package is documented at: http://docs.paramiko.org Complete set of SSH client operations is available at: http://docs.paramiko.org/en/1.15/api/client.html command_ssh.py """ # built-in modules import time import socket # third-party modules import paramiko def enable_privileged_commands(device_info, rsh): """Turn on privileged commands execution. :param dict device_info: dictionary containing information about target device. :param paramiko.channel.Channel rsh: channel connected to a remote shell. """ cmd = "enable\n" # Execute the command (wait for command to complete) rsh.send(cmd) time.sleep(1) output = rsh.recv(device_info['max_bytes']) if(device_info['password_prompt'] in output): password = <PASSWORD>" % device_info['password'] rsh.send(password) rsh.recv(device_info['max_bytes']) <file_sep>import pexpect ping = pexpect.spawn('ping -c 5 localhost') result = ping.expect([pexpect.EOF, pexpect.TIMEOUT]) print(ping.before) <file_sep># /usr/bin/python2.7 # Purpose: Simple program to test Telnet functionality to a Cisco router # Based on code in the PRNE training program, Section 6.3. # Currently, the password and IP have to be hard-coded into the program. # Can't seem to get user input to work correctly... # Author: <NAME> # Language: Python2.7 # telnet2.py # 9/30/2017 # v1 import pexpect # Get user input for IP and password ip_address = '192.168.229.130' password = '<PASSWORD>' # input("Please enter the IP address of the device you'd like to connect to: ") # Get the password # pw = input("Enter the device password: ") # Create pExpect telnet session session = pexpect.spawn('telnet ' + ip_address, timeout=20) print 'Attempting to telnet into the device...' # Prompt for password result = session.expect(['Password:', pexpect.TIMEOUT]) # Check & print error, if one exists if result != 0: print '--- FAILURE! creating sessions for: ', ip_address exit() # Session expecting password, enter it here session.sendline(password) result = session.expect(['>', pexpect.TIMEOUT]) # Check for error, if so then print error and exit if result != 0: print ' FAILURE! entering password: ', password exit() print '--- Success! connecting to: ', ip_address # print '--- Username: ', username print '--- Password: ', <PASSWORD> print '------------------------------------------------------\n'
59c0b1b6cde3a7f1ce7f4e5d4b65946ee21cb41b
[ "Markdown", "Python" ]
4
Markdown
kmanwar89/PRNE
40ce71196549349ec52f606845f1db258ca2ba0d
edbb6bd1e736e2915ae8f2ef0ec252ee3878438b
refs/heads/main
<repo_name>Kempo/community-carecard<file_sep>/src/scenes/Home/components/FeaturedBusiness/index.js import React from 'react'; import { PropTypes } from 'prop-types'; import { useHistory } from 'react-router-dom'; import { Trans } from 'react-i18next'; import { EnvironmentOutlined } from '@ant-design/icons'; import { businessPagePath } from 'services/urlHelper'; import './style.scss'; const FeaturedBusiness = ({ business, icon }) => { const history = useHistory(); const handleClick = () => { history.push(businessPagePath(business.googlePlaceId)); }; return ( <div className="featured-business" onClick={handleClick}> <div className="image" style={{ backgroundImage: `url(${business.image})` }} /> <div className="business-info"> <div className="business-text"> <h5>{business.title}</h5> <div className="location"> <EnvironmentOutlined /> <p>{business.loc}</p> </div> <p>{business.subText}</p> </div> <div className="business-link"> <div className="business-link-text"> <Trans i18nKey={business.linkID} /> </div> <div className="icon"> {icon} </div> </div> </div> </div> ); }; FeaturedBusiness.propTypes = { business: PropTypes.shape().isRequired, icon: PropTypes.element.isRequired, }; export default FeaturedBusiness; <file_sep>/src/scenes/About/components/TeamMember/index.js import React from 'react'; import { Avatar, Row, Col } from 'antd'; import { PropTypes } from 'prop-types'; import './style.scss'; const TeamMember = ({ teamMember }) => { return ( <Row className="team-member" justify="center"> <Avatar className="member-image" src={teamMember.image} size={250} /> <Col className="member-info" xs={16} md={12}> <h5 className="member-name">{teamMember.name}</h5> <p className="location">{teamMember.location}</p> <p className="role">{teamMember.role}</p> <p className="blurb">{teamMember.blurb}</p> </Col> </Row> ); }; TeamMember.propTypes = { teamMember: PropTypes.shape().isRequired, }; export default TeamMember; <file_sep>/src/scenes/About/components/TeamMember/style.scss .team-member { padding: 30px 0; font-size: 14px; .member-image { margin-right: 40px; } .member-info { .location { margin-bottom: 0px; } .member-name { margin: 5px 0 0 0; } } } <file_sep>/src/scenes/Home/components/FeaturedSection/index.js import React, { useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { Col, Row } from 'antd'; import FeaturedBusiness from '../FeaturedBusiness'; import { mockBusinesses } from './mockBusinesses'; import { NUM_BUSINESSES_SHOWN } from './constants'; import featuredIconOrange from '../../../../images/home/featured-biz-arrow-orange.png'; import featuredIconPink from '../../../../images/home/featured-biz-arrow-pink.png'; import featuredIconYellow from '../../../../images/home/featured-biz-arrow-yellow.png'; import './style.scss'; const icons = [ featuredIconYellow, featuredIconPink, featuredIconOrange, ]; const FeaturedSection = () => { const { t } = useTranslation(); const businesses = useMemo(() => ( mockBusinesses(t) .sort(() => Math.random() - Math.random()) .slice(0, NUM_BUSINESSES_SHOWN) ), [t]); return ( <div className="featured-section container"> <h2 className="section-title">{t('HOME.FEATURED_BUSINESSES')}</h2> <Row className="business-list" justify="center" > {businesses.map((business, index) => ( <Col className="business" xs={20} md={8} lg={7} key={business.title} > <FeaturedBusiness business={business} icon={<img src={icons[index]} alt="" />} /> </Col> ))} </Row> </div> ); }; export default FeaturedSection; <file_sep>/src/components/Header/style.scss @import 'styles/colors.scss'; @import 'styles/sizes.scss'; @import 'styles/screenSizes.scss'; @import 'styles/antdOverrides/button.scss'; .header, .header.ant-layout-header { position: fixed; width: 100%; background-color: white; padding: 0; box-shadow: 0px -2px 10px; z-index: 10; .header-content{ display: flex; align-items: center; &, .logo { height: $header-height; } .ccc-icon { height: 40px; padding: 0px 0px 5px 25px; } .header-right { display: flex; justify-content: flex-end; align-items: center; width: 100%; .hamburger-icon { color: $header-color; font-size: 24px; margin-right: 24px; } } } .business-button { float: right; margin-right: 16px; } } .header-menu.ant-menu { flex-basis: 100%; display: inline-block; border-bottom: white; border-right-color: white; .ant-menu-item { top: 0; border-bottom: none; &:hover { border-bottom: none; } &:after { border-right-color: white; } &.ant-menu-item-selected { background-color: white; a span { &:after { content: ''; display: block; position: absolute; bottom: -6px; height: 4px; width: 100%; border-radius: 1px; background-color: $teal-500; } } } a { font-weight: 600; color: $blue-700; &:hover { font-weight: 800; color: $blue-700; } span { position: relative; &:before { display: block; content: attr(title); font-weight: 800; height: 1px; color: transparent; overflow: hidden; visibility: hidden; } } } } } .mobile-drawer { .header-menu.ant-menu { margin-top: 24px; } } <file_sep>/src/scenes/Home/components/HowItWorks/index.js import React from 'react'; import { Row, Col } from 'antd'; import { useTranslation, Trans } from 'react-i18next'; import CImg from '../../../../images/c-icon-small.png'; import step1 from '../../../../images/howItWorks/c-step1.png'; import step2 from '../../../../images/howItWorks/c-step2.png'; import step3 from '../../../../images/howItWorks/c-step3.png'; import step4 from '../../../../images/howItWorks/c-step4.png'; import step5 from '../../../../images/howItWorks/c-step5.png'; import './style.scss'; const HowItWorks = () => { const { t } = useTranslation(); return ( <div className="how-it-works" id="howitworks"> <Row className="container" justify="center" align="top"> <Col xs={22} lg={24}> <img className="c-logo" src={CImg} alt="" /> <h2 className="title">{t('HOW_IT_WORKS.MAIN')}</h2> <div className="body-text"> <p className="bdy"><Trans i18nKey="HOW_IT_WORKS.WHAT" /></p> <p className="bdy"><Trans i18nKey="HOW_IT_WORKS.STATS" /></p> <p className="bdy"><Trans i18nKey="HOW_IT_WORKS.SELL" /></p> <h5 className="bdy"><Trans i18nKey="HOW_IT_WORKS.HERE" /></h5> </div> <div className="steps"> {[step1, step2, step3, step4, step5].map((step, index) => ( <div className="step"> <img src={step} alt={`Step ${index}`} /> <p> {t(`HOW_IT_WORKS.STEP_${index}`)} </p> </div> ))} </div> </Col> </Row> </div> ); }; export default HowItWorks; <file_sep>/src/scenes/Confirmation/index.js import React from 'react'; import { Row, Button } from 'antd'; import { useLocation, Link } from "react-router-dom"; import { Trans, useTranslation } from 'react-i18next'; import Payment from './components/Payment'; import './style.scss'; const Confirmation = () => { const location = useLocation(); const { t } = useTranslation(); return ( <div className="confirmation container"> { location.state ? ( <div> <h2 className="title"> {t(location.state.title)} </h2> <h6> <Trans> {t(location.state.descriptionKey, { name: location.state.businessName, email: location.state.businessEmail })} </Trans> </h6> </div> ) : <Payment /> } <Row justify="center"> <Link to="/faq" className="links"> <Button size="large" type="primary"> {t('ONBOARD.CONFIRMATION.FAQ_LINK')} </Button> </Link> <Link to="/" className="links"> <Button size="large"> {t('ONBOARD.CONFIRMATION.HOME_LINK')} </Button> </Link> </Row> </div> ) }; export default Confirmation; <file_sep>/src/scenes/About/components/OurValuesSection/index.js import React from 'react'; import { useTranslation } from 'react-i18next'; import { Col, Row } from 'antd'; import './style.scss'; const OurValuesSection = () => { const { t } = useTranslation(); const hasBorder = false; return ( <div className="values"> {hasBorder && <div className="border"/>} <h2 className="title">{t('ABOUT.VALUES')}</h2> <Row className="values-row" gutter={32} justify="center"> <Col> <span className="optimistic">{t('ABOUT.OPTIMISTIC')}</span> </Col> <Col> <span className="local">{t('ABOUT.LOCAL')}</span> </Col> <Col> <span className="compassionate">{t('ABOUT.COMPASSIONATE')}</span> </Col> <Col> <span className="trustworthy">{t('ABOUT.TRUSTWORTHY')}</span> </Col> </Row> </div> ); }; export default OurValuesSection; <file_sep>/README.md # Community Carecard Virtual gift cards for small businesses. Built with a remote team of 10+ volunteers (San Francisco, Seattle, Brazil, New Mexico, and Chicago) Received $1000 grant from the 1517 Fund. **Status:** Temporarily shutdown as of December 2020. [http://ccarecard.com](http://ccarecard.com) # Stack - React.js - Stripe Checkout + Connect - Ant Design UI <file_sep>/src/routes.js import React from 'react'; import { Route, Switch } from 'react-router-dom'; import { About, Business, FAQ, Home, Contact, Onboard, Confirmation, Resources, PrivacyPolicy, ForBusinesses } from './scenes'; const Routes = () => ( <Switch> <Route path="/about"> <About /> </Route> <Route path="/faq"> <FAQ /> </Route> <Route path="/contact"> <Contact /> </Route> <Route path="/onboard/:id?" component={Onboard} /> <Route path="/confirmation"> <Confirmation /> </Route> <Route path="/payment/confirm"> <Confirmation /> </Route> <Route path="/business-resources"> <Resources /> </Route> <Route path="/for-businesses"> <ForBusinesses /> </Route> <Route path="/privacy-policy" component={PrivacyPolicy} /> <Route path="/businesses/:id" component={Business} /> <Route path="/"> <Home /> </Route> </Switch> ); export default Routes; <file_sep>/src/styles/index.scss @import '~antd/dist/antd.css'; @import './colors.scss'; @import './antdOverrides/button.scss'; @import './antdOverrides/checkbox.scss'; body { margin: 0; box-sizing: border-box; color: $gray-700; font-family: 'Montserrat', sans-serif; font-weight: 400; font-size: 12px; } h1 { font-weight: 700; font-size: 32px; &.white { color: white; } } h2 { color: $blue-700; font-weight: 700; font-size: 36px; } h3 { color: $blue-700; font-weight: 600; font-size: 24px; &.primary { color: $orange-700; } } h5 { color: $blue-700; font-weight: 700; font-size: 16px; } h6 { font-weight: 400; font-size: 16px; color: $gray-700; } a { color: $blue-700; } <file_sep>/src/services/deviceWidthHelper.js export const isPhone = width => width < 768; <file_sep>/src/scenes/Home/components/Testimonial/index.js import React from 'react'; import PropTypes from 'prop-types'; import { Avatar, Row, Col } from 'antd'; import './style.scss'; const Testimonial = (props) => { const { business, image, owner, quote, } = props; return ( <div className="testimonial"> <Row className="container" justify="center"> <Col xs={20} md={16} > <div className="quote"> “ {quote} “ </div> <div className="user-info"> <Avatar size={80} src={image} /> <div className="user-names"> <div className="owner-name">{owner}</div> <div className="business-name">{business}</div> </div> </div> </Col> </Row> </div> ); }; Testimonial.propTypes = { business: PropTypes.string.isRequired, image: PropTypes.string.isRequired, owner: PropTypes.string.isRequired, quote: PropTypes.string.isRequired, }; export default Testimonial; <file_sep>/src/scenes/Home/components/SubscribeSection/style.scss @import 'styles/colors.scss'; @import 'styles/screenSizes.scss'; .subscribe-section { display: flex; flex-direction: column; background-image: url('../../../../images/textures/subscribe-texture-small.png'); background-repeat: no-repeat; background-position: center; background-size: cover; text-align: center; font-size: 16px; @include large-desktop { background-image: url('../../../../images/textures/subscribe-texture-large.png'); } h2 { margin: 28px 0 42px 0; } .email-section { display: flex; justify-content: center; margin-bottom: 72px; form { width: 100%; } .email-input, .submit-button { margin-top: 18px; } .email-input { flex-grow: 2; margin-right: 16px; input { flex-grow: 2; } } } .social-buttons { margin: 20px auto 40px auto;; p { font-style: italic; margin-top: 20px; } .social-share-buttons { .social-share-button { margin: 0 10px; } } } } <file_sep>/src/components/SocialShareButtons/index.js import React, { useMemo } from 'react'; import PropTypes from 'prop-types'; import { notification } from 'antd'; import { EmailIcon, FacebookIcon, FacebookShareButton, TwitterIcon, TwitterShareButton, WhatsappIcon, WhatsappShareButton, } from 'react-share'; import { CopyToClipboard } from 'react-copy-to-clipboard'; import { FACEBOOK, TWITTER, WHATS_APP, EMAIL, COPY_LINK, } from 'constants.js'; import { CopyLinkIcon } from 'components/customIcons'; import './style.scss'; const hashTag = 'ccarecard'; const atTag = 'cmtycarecard'; const SocialShareButtons = ({ message, platforms, url }) => { const buttonProps = { className: 'social-share-button', url, }; const iconProps = { round: true, size: 30, }; const { showFacebook, showTwitter, showWhatsApp, showEmail, showCopyLink, } = useMemo(() => ({ showFacebook: platforms.includes(FACEBOOK), showTwitter: platforms.includes(TWITTER), showWhatsApp: platforms.includes(WHATS_APP), showEmail: platforms.includes(EMAIL), showCopyLink: platforms.includes(COPY_LINK), }), [platforms]); const handleCopy = () => { notification.open({ description: 'Link copied to clipboard', placement: 'bottomRight', }); }; return ( <div className="social-share-buttons"> {showFacebook && ( <div title={FACEBOOK}> <FacebookShareButton {...buttonProps} quote={`${message} - ${url}`} hashtag={`#${hashTag}`} > <FacebookIcon {...iconProps} /> </FacebookShareButton> </div> )} {showTwitter && ( <div title={TWITTER}> <TwitterShareButton {...buttonProps} title={`${message} @${atTag}`} hashtags={[hashTag]} > <TwitterIcon {...iconProps} /> </TwitterShareButton> </div> )} {showWhatsApp && ( <div title={WHATS_APP}> <WhatsappShareButton {...buttonProps} title={`${message}`} separator=" - " > <WhatsappIcon {...iconProps} /> </WhatsappShareButton> </div> )} {showEmail && ( <div title={EMAIL} className="social-share-button"> <a href={`mailto:?body=${message}%0A${url}`} rel="noopener noreferrer" target="_blank" > <EmailIcon {...iconProps} /> </a> </div> )} {showCopyLink && ( <div title={COPY_LINK} className="social-share-button"> <CopyToClipboard text={url} onCopy={handleCopy} > <CopyLinkIcon className="custom-icon" /> </CopyToClipboard> </div> )} </div> ); }; SocialShareButtons.propTypes = { message: PropTypes.string.isRequired, platforms: PropTypes.arrayOf(PropTypes.string), url: PropTypes.string.isRequired, }; SocialShareButtons.defaultProps = { platforms: [FACEBOOK, TWITTER, WHATS_APP, EMAIL, COPY_LINK], }; export default SocialShareButtons; <file_sep>/src/scenes/ForBusinesses/components/Description/style.scss .for-biz-desc { text-align: left; .text { font-size: 16px; } strong { color: #006699; } margin-bottom: 48px; }<file_sep>/src/scenes/Resources/index.js import React from 'react'; import { useTranslation } from 'react-i18next'; import Section from './components/Section'; import { General, CaresAct, Federal, Commercial, FinancialAid, Health } from './Links'; import { Row, Col } from 'antd'; import './style.scss'; const Resources = () => { const { t } = useTranslation(); const sections = [ { header: 'General', links: General(t) }, { header: 'CARES Act Loans', links: CaresAct(t) }, { header: 'Federal Government', links: Federal(t) }, { header: 'Commercial Programs', links: Commercial(t) }, { header: 'Financial Aid', links: FinancialAid(t) }, { header: 'Mental Health & Wellness', links: Health(t) }] return ( <div className="resources container"> <Row className="top" justify="center"> <Col span={16}> <h2 className="title">{t("BIZ_RESOURCE.HEADER")}</h2> <h6> {t("BIZ_RESOURCE.DESC")} </h6> </Col> </Row> { sections.map((section) => ( <Section key={section.header} name={section.header} links={section.links} /> )) } <Row className="bottom" justify="center"> <Col span={16}> <footer> {t("BIZ_RESOURCE.FOOTER")} </footer> </Col> </Row> </div> ) } export default Resources; <file_sep>/src/components/TermsOfUse/index.js import React from 'react'; import PropTypes from 'prop-types'; import { useTranslation } from 'react-i18next'; import { Modal } from 'antd'; import Terms from './components/Terms'; import './style.scss'; const TermsOfUseModal = (props) => { const { visible, onCancel } = props; const { t } = useTranslation(); return ( <Modal className="terms-of-use-modal" visible={visible} onCancel={onCancel} footer={null} title={t('SHARED.TOS')} width="95%" > <Terms /> </Modal> ); }; TermsOfUseModal.propTypes = { onCancel: PropTypes.func, visible: PropTypes.bool, }; TermsOfUseModal.defaultProps = { onCancel: () => {}, visible: true, }; export default TermsOfUseModal; <file_sep>/src/components/IncrementSelector/style.scss @import 'styles/colors.scss'; .increment-selector { display: flex; align-items: center; font-size: 24px; .number-input { width: 44px; .ant-input-number-input { text-align: center; } .ant-input-number-handler-wrap { display: none; } } .decrement-button { color: $blue-gray-500; margin-right: 8px; } .increment-button { color: $orange-700; margin-left: 8px; } } <file_sep>/src/scenes/Business/components/SupportForm/components/GiftCardSelection/index.js import React, { useEffect, useState } from 'react'; import PropTypes from 'prop-types'; import { useTranslation } from 'react-i18next'; import { Radio } from 'antd'; import IncrementSelector from 'components/IncrementSelector'; import './style.scss'; const GiftCardSelection = (props) => { const { t } = useTranslation(); const { onSelect } = props; const [amount, setAmount] = useState(null); const [quantity, setQuantity] = useState(1); useEffect(() => { if (onSelect) onSelect(amount, quantity); }, [onSelect, amount, quantity]); const handleChangeAmount = e => setAmount(Math.trunc(e.target.value)); const handleChangeQuantity = q => setQuantity(q); return ( <div className="gift-card-selection"> <div className="amount-description"> {t('PAYMENT.GIFT_CARD_AMOUNT_DESCRIPTION')} </div> <Radio.Group className="amount-selection" onChange={handleChangeAmount}> <Radio.Button value={5}>$5</Radio.Button> <Radio.Button value={10}>$10</Radio.Button> <Radio.Button value={25}>$25</Radio.Button> <Radio.Button value={50}>$50</Radio.Button> </Radio.Group> <div className="quantity-selection"> <IncrementSelector value={quantity} onChange={handleChangeQuantity} /> </div> </div> ); }; GiftCardSelection.propTypes = { onSelect: PropTypes.func.isRequired, }; export default GiftCardSelection; <file_sep>/src/scenes/Confirmation/components/Payment/index.js import React, { useState, useEffect } from 'react'; import axios from 'axios'; import { Row, Col, List, Spin } from 'antd'; import { LoadingOutlined } from '@ant-design/icons' import { useTranslation, Trans } from 'react-i18next'; import { transactionDetailsUrl } from '../../../../services/urlHelper'; import { dollarFormat } from '../../../../services/Numbers'; import { useQueryParams } from '../../../../hooks/useQueryParams'; import './style.scss'; import lovingImg from '../../../../images/payment/loving.png'; const Payment = () => { const [details, setDetails] = useState({}); const { total, contribution, processingFee, orderLine, customerEmail } = details; const { t } = useTranslation(); const { orderId } = useQueryParams('orderId') useEffect(() => { axios.get(transactionDetailsUrl(orderId)) .then(res => setDetails(res.data.orderDetail)) .catch(err => setDetails({ error: true })); }, [orderId]); const combinedTips = orderLine ? orderLine.reduce((a, b) => (a + b.tip), 0) : 0; const totalAfterAll = dollarFormat(total + processingFee); const loadingIcon = <LoadingOutlined style={{ fontSize: 48 }} spin />; const filteredList = () => { const list = [] orderLine.map(order => ( list.push({ left: `Gift Card(s) - ${order.businessName}`, right: `$ ${dollarFormat(total - contribution - order.tip)}` }) )); if(combinedTips > 0) { list.push({ left: t('CONFIRMATION_MODAL.ITEMIZED.DONATION'), right: `$ ${dollarFormat(combinedTips)}` }); } if(contribution > 0) { list.push({ left: t('CONFIRMATION_MODAL.ITEMIZED.COFFEE'), right: `$ ${dollarFormat(contribution)}` }); } list.push({ left: t('CONFIRMATION_MODAL.PROCESSING_FEE'), right: `$ ${dollarFormat(processingFee)}` }); return list; } const renderReceipt = () => ( <React.Fragment> <Row justify="center"> <Col xs={20} md={16}> <h6> <Trans> {t('PAYMENT_SUCCESS.DESC', { email: customerEmail })} </Trans> </h6> </Col> </Row> <Row className="order-summary" justify="center"> <Col xs={20} md={16}> <List header={<strong><Trans>{t('PAYMENT_SUCCESS.ORDER_ID')}</Trans></strong>} itemLayout="horizontal" footer={<strong>{`${t('CONFIRMATION_MODAL.TOTAL')}: $ ${totalAfterAll}`}</strong>} bordered={true} dataSource={filteredList()} style={{textAlign: 'left'}} renderItem={item => ( <List.Item extra={item.right}> <List.Item.Meta title={item.left} /> </List.Item> )} /> </Col> </Row> </React.Fragment> ); const renderLoading = () => ( <Row justify="center"> <Col span={12}> <Spin indicator={loadingIcon} tip="Loading..." /> </Col> </Row> ); return ( <Row justify="center" className="payment-body"> <Col xs={22} md={20}> <img src={lovingImg} alt="Thank you!" /> <h2>{t('PAYMENT_SUCCESS.TITLE')}</h2> <h6> <Trans>{t('PAYMENT_SUCCESS.ID', { id: orderId })}</Trans> </h6> { total ? renderReceipt() : (!details.error ? renderLoading() : <h6><Trans i18nKey="PAYMENT_SUCCESS.ERROR" /></h6>) } </Col> </Row> ) }; export default Payment; <file_sep>/src/constants.js // Development (master) export const SERVER_URL = 'https://api.dev.coronacarecard.com'; export const STRIPE_KEY = 'removed'; // Beta (stage) // export const SERVER_URL = 'https://api.beta.coronacarecard.com'; // export const STRIPE_KEY = 'removed'; export const APP_URL = 'www.ccarecard.com'; export const GOOGLE_API_KEY = 'removed'; export const CONTACT_EMAIL = '<EMAIL>'; export const STATUS_CLAIMED = 'Claimed'; export const STATUS_PENDING = 'Pending'; export const STATUS_ACTIVE = 'Active'; export const ALERT_BUSINESS_ACTIVE = 'business-active'; export const ALERT_BUSINESS_CLAIMED = 'business-claimed'; export const ALERT_BUSINESS_PENDING = 'business-pending'; export const FACEBOOK = 'Facebook'; export const TWITTER = 'Twitter'; export const WHATS_APP = 'WhatsApp'; export const EMAIL = 'Email'; export const COPY_LINK = 'Copy Link'; <file_sep>/src/scenes/Home/components/LogoSection/index.js import React from 'react'; import { useTranslation } from 'react-i18next'; import { Row, Col } from 'antd'; import LogoImgOne from '../../../../images/home/Stripe_logo.png'; import LogoImgTwo from '../../../../images/home/covid_accelerator_logo.png'; import LogoImgThree from '../../../../images/home/Bloomberg_logo.png'; import './style.scss'; const LogoSection = () => { const { t } = useTranslation(); return ( <div className="logo-section"> <div className="top-bar"> <p>{t('HOME.SPONSORED.POW')}</p> <a href="//stripe.com" target="_blank" rel="noopener noreferrer"> <img src={LogoImgOne} alt={t('HOME.SPONSORED.POW_ALT')} /> </a> </div> <Row className="bot-bar" justify="center"> <Col className="logo-col" xs={20} md={12} xl={8}> <p>{t('HOME.SPONSORED.PART')}</p> <a href="//covidaccelerator.com" target="_blank" rel="noopener noreferrer"> <img src={LogoImgTwo} alt={t('HOME.SPONSORED.PART_ALT')} /> </a> </Col> <Col className="logo-col" xs={20} md={12} xl={8}> <p>{t('HOME.SPONSORED.MEP')}</p> <a href="//bloomberg.com/news/articles/2020-04-16/coronavirus-forces-silicon-valley-to-reset-its-moral-compass" target="_blank" rel="noopener noreferrer"> <img src={LogoImgThree} alt={t('HOME.SPONSORED.MEP_ALT')} /> </a> </Col> </Row> </div> ); }; export default LogoSection; <file_sep>/src/scenes/Resources/components/Section/index.js import React from 'react'; import PropTypes from 'prop-types'; import { Row, Col } from 'antd'; import './style.scss'; const Section = ({ name, links }) => { return ( <Row className="resource-section" justify="center"> <Col span={18}> <h3>{name}</h3> {links.map(res => ( <Row className="resource" key={res.link} justify="left"> <Col> <a href={res.link} target="_blank" rel="noopener noreferrer">{res.name}</a> { res.desc && ( <p>{res.desc}</p> ) } </Col> </Row> ))} </Col> </Row> ); }; Section.propTypes = { name: PropTypes.string.isRequired, links: PropTypes.array.isRequired, }; export default Section; <file_sep>/src/components/customIcons/CopyLinkIcon.js import React from 'react'; import Icon from '@ant-design/icons'; const copyLinkSvg = () => ( <svg xmlns="http://www.w3.org/2000/svg" data-name="Layer 1" viewBox="0 0 32 40" x="0px" y="0px"> <path d="M24,29H12a3,3,0,0,1-3-3V10a3,3,0,0,1,3-3H24a3,3,0,0,1,3,3V26A3,3,0,0,1,24,29ZM12,9a1,1,0,0,0-1,1V26a1,1,0,0,0,1,1H24a1,1,0,0,0,1-1V10a1,1,0,0,0-1-1Z" fill="#006699" /> <path d="M10,25H8a3,3,0,0,1-3-3V6A3,3,0,0,1,8,3H20a3,3,0,0,1,3,3V8a1,1,0,0,1-2,0V6a1,1,0,0,0-1-1H8A1,1,0,0,0,7,6V22a1,1,0,0,0,1,1h2a1,1,0,0,1,0,2Z" fill="#006699" /> </svg> ); const CopyLinkIcon = props => <Icon {...props} component={copyLinkSvg} />; export default CopyLinkIcon; <file_sep>/src/scenes/Home/components/FeaturedSection/mockBusinesses.js import yourAvGuyImg from '../../../../images/your-AV-guy.jpg'; import ckNailSpaImg from '../../../../images/kathy-huynh-CK.jpg'; import lastMinuteGearImg from '../../../../images/last-minute-gear-james-dong.jpg'; import stBernardImg from '../../../../images/joseph-nguyen.jpg'; export const mockBusinesses = t => [ { title: t('FEATURED.0.TITLE'), image: lastMinuteGearImg, subText: t('FEATURED.0.DESC'), linkID: 'FEATURED.0.LINK', loc: t('FEATURED.0.LOCATION'), googlePlaceId: 'Ch<KEY>', }, { title: t('FEATURED.1.TITLE'), image: ckNailSpaImg, subText: t('FEATURED.1.DESC'), linkID: 'FEATURED.1.LINK', loc: t('FEATURED.1.LOCATION'), googlePlaceId: 'Ch<KEY>', }, { title: t('FEATURED.2.TITLE'), image: yourAvGuyImg, subText: t('FEATURED.2.DESC'), linkID: 'FEATURED.2.LINK', loc: t('FEATURED.2.LOCATION'), googlePlaceId: 'Ch<KEY>', }, { title: t('FEATURED.3.TITLE'), image: stBernardImg, subText: t('FEATURED.3.DESC'), linkID: 'FEATURED.3.LINK', loc: t('FEATURED.3.LOCATION'), googlePlaceId: 'Ch<KEY>', }, ]; <file_sep>/src/scenes/About/components/MeetTeamSection/style.scss @import 'styles/colors.scss'; .meet-team-section { .section-title { margin: 46px 0; } .team-members { .team-member:nth-child(odd) { background-color: $tertiary-200; } } } <file_sep>/src/scenes/Contact/index.js import React from 'react'; import { Button, Row, Col } from 'antd'; import { Link } from 'react-router-dom'; import { useTranslation, Trans } from 'react-i18next'; import { CONTACT_EMAIL } from 'constants.js'; import './style.scss'; const Contact = () => { const { t } = useTranslation(); return ( <div className="contact container"> <h2>{t('CONTACT.HEADER')}</h2> <Row justify="center"> <Col xs={22} sm={20} md={12}> <p className="description"> <Trans i18nKey="CONTACT.DESC"> If you have a question, idea or comment about Community Carecard, email us at <a href={`mailto:${CONTACT_EMAIL}`} target='_blank' rel='noopener noreferrer'>{{ email: CONTACT_EMAIL }}</a> You can also visit our Frequently Asked Questions. </Trans> </p> </Col> </Row> <Row justify="center"> <Link to="/faq" className="links"> <Button size="large" type="primary"> {t('CONTACT.FAQ_LINK')} </Button> </Link> </Row> </div> ); }; export default Contact; <file_sep>/src/scenes/Business/components/SupportForm/components/ConfirmationModal/index.js import React, { useState } from 'react'; import PropTypes from 'prop-types'; import { useTranslation } from 'react-i18next'; import { Modal, Button, Row, Col, Checkbox, Input, Form, } from 'antd'; import './style.scss'; import ItemizedSection from './components/ItemizedSection'; import TotalSection from './components/TotalSection'; import { dollarFormat, fees } from '../../../../../../services/Numbers'; const ConfirmationModal = (props) => { const { businessName, giftCardAmount, giftCardQuantity, donationAmount, total, onOk, onCancel, onClickTerms, visible, submitting } = props; const { t } = useTranslation(); const [hasTeamTip, setHasTeamTip] = useState(false); const totalBeforeFee = (total + (hasTeamTip ? 1 : 0)); const totalAfterFee = dollarFormat((totalBeforeFee + fees.USD.fixed) / (1 - fees.USD.percent)); const processingFee = dollarFormat(totalAfterFee - totalBeforeFee); const handleChangeCheckbox = e => setHasTeamTip(e.target.checked); const handleCancel = () => onCancel(); const handleOk = (event) => { onOk({ businessName, giftCardAmount, giftCardQuantity, hasTeamTip, donationAmount, totalBeforeFee, processingFee, customerEmail: event.email, }); }; return ( <React.Fragment> <Modal className="confirmation-modal" closable={false} visible={visible} title={null} bodyStyle={{ paddingTop: 40, paddingBottom: 40, paddingLeft: 45, paddingRight: 45, }} footer={null} > <Form onFinish={handleOk} layout="vertical" size="large" > <h2 className="title">{t('CONFIRMATION_MODAL.HEADER')}</h2> <p className="please-check">{t('CONFIRMATION_MODAL.PLEASE_CHECK')}</p> <h6 className="confirmation-modal-business"> {businessName} </h6> <ItemizedSection giftCardAmount={dollarFormat(giftCardAmount)} giftCardQuantity={giftCardQuantity} donationAmount={dollarFormat(donationAmount)} teamTip={hasTeamTip} /> <Row justify="end"> <Col xs={20} sm={14} md={10}> <TotalSection total={totalAfterFee} subtotal={dollarFormat(totalBeforeFee)} fee={processingFee} /> </Col> </Row> <Row className="email-row"> <Col xs={24}> <Form.Item name="email" label={`${t('CONFIRMATION_MODAL.EMAIL')}:`} rules={[{ required: true, type: 'email', message: t('CONFIRMATION_MODAL.EMAIL_ERROR') }]} validateTrigger={['onBlur']} > <Input /> </Form.Item> </Col> </Row> <Row> <Col xs={24}> <Checkbox onChange={handleChangeCheckbox} /> <span className="coffee-on-me"> {t('CONFIRMATION_MODAL.CCC_TIP_DESC')} </span> </Col> <Col xs={24}> <div className="support-care-team">{t('CONFIRMATION_MODAL.CCC_TIP_SUB_DESC')}</div> </Col> <Col xs={24}> <div className="terms-of-service"> <span>{t('SHARED.ACCEPT_TOS')}</span> <span className="tos-link" onClick={onClickTerms}> {t('SHARED.TOS')} </span> </div> </Col> </Row> <Row> <Col xs={10} md={8}> <Button type="default" onClick={handleCancel} className="cancel-button" > {t('CONFIRMATION_MODAL.GO_BACK_CAPS')} </Button> </Col> <Col md={{ offset: 8, span: 8 }} xs={{ offset: 4, span: 10 }}> <Button type="primary" htmlType="submit" className="continue-button" loading={submitting} > {t('CONFIRMATION_MODAL.CONTINUE_CAPS')} </Button> </Col> </Row> </Form> </Modal> </React.Fragment> ); }; ConfirmationModal.propTypes = { businessName: PropTypes.string.isRequired, donationAmount: PropTypes.number, giftCardAmount: PropTypes.number, giftCardQuantity: PropTypes.number, onOk: PropTypes.func.isRequired, onCancel: PropTypes.func.isRequired, onClickTerms: PropTypes.func.isRequired, total: PropTypes.number.isRequired, visible: PropTypes.bool, submitting: PropTypes.bool.isRequired, }; ConfirmationModal.defaultProps = { donationAmount: 0, giftCardAmount: 0, giftCardQuantity: 0, visible: true, }; export default ConfirmationModal; <file_sep>/src/scenes/About/components/AboutSection/index.js import React from 'react'; import { useTranslation, Trans } from 'react-i18next'; import { Col, Row } from 'antd'; import './style.scss'; const AboutSection = () => { const { t } = useTranslation(); return ( <div className="about-section"> <h2 className="title"> {t('ABOUT.HEADER')}</h2> <Row justify="center" > <Col span={20}> <p className="about-text"> <Trans i18nKey="ABOUT.PARAGRAPH_1" /> </p> <p className="about-text"> <Trans i18nKey="ABOUT.PARAGRAPH_2" /> </p> <p className="about-text"> <Trans i18nKey="ABOUT.PARAGRAPH_3" /> </p> </Col> </Row> </div> ); }; export default AboutSection; <file_sep>/src/services/Numbers.js export const dollarFormat = (calc) => Number(calc).toFixed(2); export const fees = { USD: { percent: 0.029, fixed: 0.30 } }<file_sep>/src/scenes/ForBusinesses/style.scss .for-businesses { text-align: center; padding: 20px 0 40px 0; h2 { padding-top: 32px; padding-bottom: 16px; } p { text-align: left; font-size: 16px; } .action-button { margin-top: 4px; margin-bottom: 16px; } }<file_sep>/src/scenes/Home/components/Testimonial/style.scss @import 'styles/colors.scss'; @import 'styles/screenSizes.scss'; .testimonial { background-image: url('../../../../images/textures/testimonial-texture-small.png'); background-size: auto 100%; background-color: $tertiary-300; background-position: center; background-repeat: no-repeat; text-align: center; padding: 84px 0 60px 0; @include large-desktop { background-image: url('../../../../images/textures/testimonial-texture-large.png'); } .quote { font-size: 16px; color: $gray-700; font-weight: 300; b { color: $blue-700; font-weight: 500; } } .user-info { display: flex; align-items: center; justify-content: center; margin-top: 10px; margin-left: 50px; .user-names { font-size: 14px; text-align: left; margin-left: 20px; .owner-name { font-weight: bold; } } } } <file_sep>/src/i18n.js import i18n from "i18next"; import { initReactI18next } from "react-i18next"; import en from './translations/en'; const resources = { en }; i18n.use(initReactI18next).init({ resources, lng: "en", keySeparator: ".", interpolation: { escapeValue: false }, returnObjects: true, react: { transSupportBasicHtmlNodes: true, transKeepBasicHtmlNodesFor: ['br', 'strong', 'b', 'i', 'p', 'a'] } }); export default i18n;<file_sep>/src/components/PlaceSearch/index.js import React, { useEffect } from 'react'; import PropTypes from 'prop-types'; import { Input } from 'antd'; import { useScript } from 'hooks/useScript'; import { googlePlacesApiUrl } from 'services/urlHelper'; const PlaceSearch = ({ placeholder, onSelect }) => { const [googleApiLoaded] = useScript(googlePlacesApiUrl()); useEffect(() => { if (!googleApiLoaded) return; const autocomplete = new window.google.maps.places.Autocomplete(document.getElementById('place-search')); autocomplete.setTypes(['establishment']); const listener = autocomplete.addListener('place_changed', () => { const placeId = autocomplete.getPlace().place_id; onSelect(placeId); }); return () => { window.google.maps.event.removeListener(listener); }; }, [onSelect, googleApiLoaded]); return ( <Input id="place-search" placeholder={placeholder} size="large" /> ); }; PlaceSearch.propTypes = { placeholder: PropTypes.string, onSelect: PropTypes.func.isRequired, }; PlaceSearch.defaultProps = { placeholder: '', }; export default PlaceSearch; <file_sep>/src/scenes/Business/components/SupportForm/components/ConfirmationModal/components/ItemizedSection/index.js import React from 'react'; import PropTypes from 'prop-types'; import { useTranslation } from 'react-i18next'; import { Row, Col, Divider } from 'antd'; import './style.scss'; const ItemizedSection = (props) => { const { t } = useTranslation(); const displayGiftCardRow = props.giftCardAmount && props.giftCardQuantity > 0; const displayDonationRow = props.donationAmount > 0; return ( <div className="itemized-section-container"> <Row className="itemized-section-header"> <Col span={8}> <span>{t('CONFIRMATION_MODAL.ITEMIZED.ITEM')}</span> </Col> <Col span={8}> <span>{t('CONFIRMATION_MODAL.ITEMIZED.VALUE')}</span> </Col> <Col span={8}> <span>{t('CONFIRMATION_MODAL.ITEMIZED.QUANTITY')}</span> </Col> </Row> {displayGiftCardRow && ( <React.Fragment> <Divider className="itemized-section-divider"/> <Row className="itemized-section-row"> <Col span={8}> <span>{t('CONFIRMATION_MODAL.ITEMIZED.GIFT_CARD')}</span> </Col> <Col span={8}> <span>${props.giftCardAmount}</span> </Col> <Col span={8}> <span>{props.giftCardQuantity}</span> </Col> </Row> </React.Fragment> )} {displayDonationRow && ( <React.Fragment> <Divider className="itemized-section-divider"/> <Row className="itemized-section-row"> <Col span={8}> <span>{t('CONFIRMATION_MODAL.ITEMIZED.DONATION')}</span> </Col> <Col span={8}> <span>${props.donationAmount}</span> </Col> <Col span={8}> <span>1</span> </Col> </Row> </React.Fragment> )} {props.teamTip && ( <React.Fragment> <Divider className="itemized-section-divider"/> <Row className="itemized-section-row"> <Col span={8}> <span>{t('CONFIRMATION_MODAL.ITEMIZED.COFFEE')}</span> </Col> <Col span={8}> <span>$1.00</span> </Col> <Col span={8}> <span>1</span> </Col> </Row> </React.Fragment> )} <Divider className="itemized-section-divider"/> </div> ); }; ItemizedSection.propTypes = { giftCardAmount: PropTypes.string, giftCardQuantity: PropTypes.number, donationAmount: PropTypes.string, teamTip: PropTypes.bool } export default ItemizedSection; <file_sep>/src/components/Footer/index.js import React, { useState } from 'react'; import { useTranslation } from 'react-i18next'; import { Layout } from 'antd'; import TermsOfUseModal from 'components/TermsOfUse'; import { privacyPolicyPath, contactPath } from 'services/routesHelper'; import { CCC_INSTAGRAM__URL, CCC_FACEBOOK_URL, CCC_TWITTER_URL, INSTAGRAM_LINK_TEXT, TWITTER_LINK_TEXT, FACEBOOK_LINK_TEXT, } from './constants'; import './style.scss'; const Footer = () => { const { t } = useTranslation(); const [termsVisibility, setTermsVisibility] = useState(false); return ( <Layout.Footer className="footer"> <div className="footer-content"> <div className="footer-section"> <a className="internal-link" href={contactPath()}> {t('SHARED.CONTACT')} </a> <div className="internal-link" onClick={() => setTermsVisibility(true)}> {t('SHARED.TOS')} </div> <a className="internal-link" href={privacyPolicyPath()}> {t('SHARED.PRIVACY_POLICY')} </a> </div> <div className="footer-section"> <a className="external-link" href={CCC_INSTAGRAM__URL} target="_blank" rel="noopener noreferrer"> {INSTAGRAM_LINK_TEXT} </a> <a className="external-link" href={CCC_TWITTER_URL} target="_blank" rel="noopener noreferrer"> {TWITTER_LINK_TEXT} </a> <a className="external-link" href={CCC_FACEBOOK_URL} target="_blank" rel="noopener noreferrer"> {FACEBOOK_LINK_TEXT} </a> </div> </div> <TermsOfUseModal onCancel={() => setTermsVisibility(false)} visible={termsVisibility} /> </Layout.Footer> ); }; export default Footer; <file_sep>/src/scenes/Business/components/SupportForm/components/ConfirmationModal/components/TotalSection/index.js import React from 'react'; import PropTypes from 'prop-types'; import { useTranslation } from 'react-i18next'; import { Divider } from 'antd'; import './style.scss'; const TotalSection = (props) => { const { t } = useTranslation(); return ( <div className="confirm-total-section"> <div> <span className="left-col">{t('CONFIRMATION_MODAL.SUBTOTAL')}:</span> <span className="right-col">${props.subtotal}</span> </div> <div> <span className="left-col">{t('CONFIRMATION_MODAL.PROCESSING_FEE')}:</span> <span className="right-col">${props.fee}</span> </div> <Divider className="confirm-total-divider"/> <div> <span className="left-bottom">{t('CONFIRMATION_MODAL.TOTAL')}:</span> <span className="right-bottom">${props.total}</span> </div> </div> ); }; TotalSection.propTypes = { total: PropTypes.string.isRequired, subtotal: PropTypes.string.isRequired, fee: PropTypes.string.isRequired }; export default TotalSection; <file_sep>/src/scenes/Home/components/FeaturedSection/constants.js export const NUM_BUSINESSES_SHOWN = 3; <file_sep>/src/scenes/About/index.js import React from 'react'; import AboutSection from './components/AboutSection'; import MeetTeamSection from './components/MeetTeamSection'; import OurValuesSection from './components/OurValuesSection'; import './style.scss'; const About = () => ( <div className="about container"> <AboutSection /> <MeetTeamSection /> <OurValuesSection /> </div> ); export default About; <file_sep>/src/scenes/PrivacyPolicy/index.js import React from 'react'; import { useTranslation } from 'react-i18next'; import { Policy } from './components/Policy'; import './style.scss'; const PrivacyPolicy = () => { const { t } = useTranslation(); return ( <div className="privacy-policy container"> <h2>{t('SHARED.PRIVACY_POLICY')}</h2> <Policy /> </div> ); }; export default PrivacyPolicy; <file_sep>/src/scenes/FAQ/components/Section/index.js import React from 'react'; import { PropTypes } from 'prop-types'; import { useTranslation, Trans } from 'react-i18next'; import { Row, Collapse } from 'antd'; import './style.scss'; const Section = ({ type }) => { const { t } = useTranslation(); const QuestionList = `FAQ.${type}`; return ( <section className="faq-category"> <Row justify="center"> <div className="container"> <h3>{type}</h3> <Collapse className="custom-collapse" bordered={false}> {t(QuestionList).map((_, index) => ( <Collapse.Panel className="custom-panel" header={t(`${QuestionList}.${index}.Q`)}> <Trans i18nKey={`${QuestionList}.${index}.A`} /> </Collapse.Panel> ))} </Collapse> </div> </Row> </section> ); }; Section.propTypes = { type: PropTypes.string.isRequired, }; export default Section; <file_sep>/src/components/Header/index.js import React, { useState } from 'react'; import { Link, useLocation, useHistory } from 'react-router-dom'; import { Button, Drawer, Layout, Menu, } from 'antd'; import { useTranslation } from 'react-i18next'; import { MenuOutlined } from '@ant-design/icons'; import { useWindowWidth } from '@react-hook/window-size'; import { emitClickEvent } from 'services/Analytics'; import { isPhone } from 'services/deviceWidthHelper'; import logo from 'images/logo.png'; import icon from 'images/icon.png'; import './style.scss'; const Header = () => { const width = useWindowWidth(); const [drawerOpen, setDrawerOpen] = useState(false); const location = useLocation(); const { pathname } = location; const { t } = useTranslation(); const history = useHistory(); const handleOnboard = () => { emitClickEvent('Button', 'I have a business'); history.push('/onboard'); }; const toggleDrawerOpen = () => setDrawerOpen(!drawerOpen); const renderButton = () => ( <Button className="btn-outlined-secondary business-button" onClick={handleOnboard}> {t('HEADER.I_HAVE_BUSINESS')} </Button> ); const renderMenu = () => ( <Menu className="header-menu" selectedKeys={[pathname]} mode={isPhone(width) ? 'inline' : 'horizontal'} onSelect={() => setDrawerOpen(false)} > <Menu.Item key="/"> <Link to="/"> <span title={t('HEADER.NAV.HOME')}> {t('HEADER.NAV.HOME')} </span> </Link> </Menu.Item> <Menu.Item key="/about"> <Link to="/about"> <span title={t('HEADER.NAV.ABOUT_US')}> {t('HEADER.NAV.ABOUT_US')} </span> </Link> </Menu.Item> <Menu.Item key="/faq"> <Link to="/faq"> <span title={t('HEADER.NAV.FAQ')}> {t('HEADER.NAV.FAQ')} </span> </Link> </Menu.Item> {isPhone(width) && ( <Menu.Item key="/onboard"> <Link to="/onboard"> <span title={t('HEADER.I_HAVE_BUSINESS')}> {t('HEADER.I_HAVE_BUSINESS')} </span> </Link> </Menu.Item> )} </Menu> ); return ( <Layout.Header className="header"> <div className="header-content"> <Link to="/"> { isPhone(width) ? ( <img src={icon} alt="" className="ccc-icon" /> ) : ( <img src={logo} alt="" className="logo" /> ) } </Link> <div className="header-right"> {isPhone(width) ? ( <React.Fragment> <MenuOutlined className="hamburger-icon" onClick={toggleDrawerOpen} /> <Drawer className="mobile-drawer" onClose={() => setDrawerOpen(false)} placement="right" visible={drawerOpen} > {renderMenu()} </Drawer> </React.Fragment> ) : ( <React.Fragment> {renderMenu()} {renderButton()} </React.Fragment> )} </div> </div> </Layout.Header> ); }; export default Header; <file_sep>/src/scenes/Business/components/SupportForm/components/ConfirmationModal/style.scss @import 'styles/colors.scss'; .confirmation-modal { .title { text-align: center; font-size: 24px; } .please-check { text-align: center; font-size: 12px; } .confirmation-modal-business { text-align: center; } .support-care-team { font-size: 12px; margin-bottom: 15px; } .coffee-on-me { font-size: 16px; font-weight: 500; margin-left: 10px; } .terms-of-service { text-align: center; font-size: 12px; margin-bottom: 26px; .tos-link { font-weight: bold; margin-left: 4px; color: $gray-700; text-decoration: underline; cursor: pointer; } } .cancel-button { width: 100%; height: 44px; color: $blue-700; border-color: $blue-700; font-size: 12px; font-weight: bold; } .continue-button { width: 100%; height: 44px; font-size: 12px; font-weight: bold; } } <file_sep>/src/services/routesHelper.js export const onboardPath = (id, name) => { let path = '/onboard'; if (id) path += `/${id}`; if (name) path += `?businessName=${name}`; return path; }; export const privacyPolicyPath = () => '/privacy-policy'; export const contactPath = () => '/contact'; <file_sep>/src/scenes/Business/services/index.js import { ALERT_BUSINESS_ACTIVE, ALERT_BUSINESS_CLAIMED, ALERT_BUSINESS_PENDING, } from '../../../constants'; import { ALERT_GENERIC_ERROR, ALERT_ONBOARDING_SUCCESSFUL } from '../constants'; export const alertPropsForType = (type, t) => { let props = null; if (type === ALERT_BUSINESS_ACTIVE) { props = { type: 'success', description: t('BUSINESS.ALERT.ACTIVE') }; } else if (type === ALERT_BUSINESS_CLAIMED) { props = { type: 'info', description: t('BUSINESS.ALERT.CLAIMED') }; } else if (type === ALERT_BUSINESS_PENDING) { props = { type: 'info', description: t('BUSINESS.ALERT.PENDING') }; } else if (type === ALERT_ONBOARDING_SUCCESSFUL) { props = { type: 'success', description: t('BUSINESS.ONBOARD_SUCCESSFUL') }; } else if (type === ALERT_GENERIC_ERROR) { props = { type: 'error', description: t('SHARED.GENERIC_ERROR') }; } return props; }; <file_sep>/src/scenes/ForBusinesses/index.js import React from 'react'; import { Link } from 'react-router-dom'; import { Button, Row, Col } from 'antd'; import { useTranslation, Trans } from 'react-i18next'; import Description from './components/Description'; import OnboardFlow from './components/OnboardFlow'; import './style.scss'; const ForBusinesses = () => { const { t } = useTranslation(); return ( <div className="for-businesses container"> <h2>{t('FOR_BIZ.HEADER')}</h2> <Description /> <OnboardFlow /> <Row justify="center"> <Col span={20}> <p>{t('FOR_BIZ.FOOTER_1')}</p> <Link to="/onboard"> <Button className="action-button" size="large">{t('FOR_BIZ.REGISTER_BIZ')}</Button> </Link> </Col> </Row> <Row justify="center"> <Col span={20}> <p> <Trans i18nKey="FOR_BIZ.FOOTER_2"> Check out our <a href='/business-resources' target='_blank' rel='noopener noreferrer'>Resources page</a> also for additional resources on navigating the coronavirus pandemic. </Trans> </p> </Col> </Row> </div> ) } export default ForBusinesses;<file_sep>/src/scenes/index.js export { default as About } from './About'; export { default as Business } from './Business'; export { default as FAQ } from './FAQ'; export { default as Contact } from './Contact'; export { default as Home } from './Home'; export { default as Onboard } from './Onboard'; export { default as Confirmation } from './Confirmation'; export { default as Resources } from './Resources'; export { default as PrivacyPolicy } from './PrivacyPolicy'; export { default as ForBusinesses } from './ForBusinesses'; <file_sep>/src/scenes/Business/components/UnclaimedBusiness/style.scss @import 'styles/colors.scss'; @import 'styles/antdOverrides/button.scss'; @import 'styles/screenSizes.scss'; .unclaimed-business { .owner-section, .customer-section { display: flex; align-items: flex-end; } .subhead { color: $blue-700; font-size: 20px; font-weight: bold; margin-top: 24px; } .description { color: $blue-700; font-size: 16px; line-height: 20px; margin-top: 12px; margin-right: 24px; } .social-buttons { padding-bottom: 30px; @include mobile { margin-top: 24px; } } button[type="button"] { min-width: 100%; margin-top: 24px; } } <file_sep>/src/components/Show404/index.js import React from 'react'; import { Link } from 'react-router-dom'; import { Row, Button } from 'antd'; import { useTranslation } from 'react-i18next'; import img404 from '../../images/404.png'; import './style.scss'; const Show404 = () => { const { t } = useTranslation(); return ( <div className="not-found"> <h2>{t('NOT_FOUND.HEADER')}</h2> <Row justify="center"> <img src={img404} alt="Page not found" /> </Row> <p className="subheader">{t('NOT_FOUND.SUBHEADER')}</p> <Link to="/home"> <Button size="large"> {t('NOT_FOUND.ACTION')} </Button> </Link> </div> ) }; export default Show404; <file_sep>/src/scenes/Contact/style.scss .contact { text-align: center; padding-top: 64px; .links { padding: 10px 20px 60px 20px; } .description { font-size: 16px; margin-bottom: 40px; } } <file_sep>/src/scenes/Resources/style.scss .resources { padding: 64px 0; .top { margin-bottom: 32px; .title { text-align: center; } } .bottom { margin-top: 32px; } }<file_sep>/src/scenes/Resources/Links.js export const General = (t) => [ { name: t('RESOURCES.GENERAL.BIZ_CONT'), link: 'https://www.honeybook.com/risingtide/business-continuity-plan-for-small-business' }, { name: t('RESOURCES.GENERAL.NRA'), link: 'https://restaurant.org/Covid19' } ] export const CaresAct = (t) => [ { name: t('RESOURCES.CARES.BREX_1'), link: 'https://brex.com/blog/cares-act-advice/' }, { name: t('RESOURCES.CARES.BREX_2'), link: 'https://brex.com/webinars/how-startups-can-take-advantage-sba-loans/' }, { name: t('RESOURCES.CARES.TREASURY'), link: 'https://home.treasury.gov/policy-issues/top-priorities/cares-act/assistance-for-small-businesses' }, { name: t('RESOURCES.CARES.PEEK'), link: 'https://www.womply.com/peek/?utm_source=peek&utm_medium=marketing_email&utm_campaign=ppp&utm_content=leadgen' }, { name: t('RESOURCES.CARES.FAIRE'), link: 'https://blog.faire.com/thestudio/covid19-federal-relief-is-available/?utm_source=blog_newsletter&utm_medium=email&utm_campaign=20200405&utm_content=ppp&mc_cid=4916545963&mc_eid=b6779e5358' }, { name: t('RESOURCES.CARES.PPP'), link: 'https://home.treasury.gov/system/files/136/PPP%20Borrower%20Information%20Fact%20Sheet.pdf' }, { name: t('RESOURCES.CARES.PREGUNTAS'), link: 'https://www.rubio.senate.gov/public/_cache/files/3aba21e8-3fb3-4844-a217-1e8fb3334e93/C8D670B7E3FF7FEFAE1B749369D55054.paycheck-protection-program-faqs-for-small-businesses-in-spanish-final.pdf' }, { name: t('RESOURCES.CARES.FAIRE_2'), link: 'https://www.faire.com/cares-act' }, { name: t('RESOURCES.CARES.SBA'), link: 'https://www.inc.com/brit-morse/sba-loans-coronavirus-stimulus-package-cares-act.html' }, { name: t('RESOURCES.CARES.STIM'), link: 'https://www.gravysolutions.io/post/how-your-business-can-take-advantage-of-the-2t-stimulus-package' }, { name: t('RESOURCES.CARES.GUIDE'), link: 'https://www.sbc.senate.gov/public/_cache/files/9/7/97ac840c-28b7-4e49-b872-d30a995d8dae/F2CF1DD78E6D6C8C8C3BF58C6D1DDB2B.small-business-owner-s-guide-to-the-cares-act-final-.pdf' } ]; export const Federal = (t) => [ { name: t('RESOURCES.FEDERAL.IRS.NAME'), desc: t('RESOURCES.FEDERAL.IRS.DESC'), link: 'https://www.irs.gov/coronavirus' }, { name: t('RESOURCES.FEDERAL.SBA.NAME'), desc: t('RESOURCES.FEDERAL.SBA.DESC'), link: 'https://www.sba.gov/disaster-assistance/coronavirus-covid-19' }, { name: t('RESOURCES.FEDERAL.SBA_2.NAME'), desc: t('RESOURCES.FEDERAL.SBA_2.DESC'), link: 'https://disasterloan.sba.gov/ela/' }, { name: t('RESOURCES.FEDERAL.SBA_ONE_PAGER'), link: 'https://cameonetwork.org/wp-content/uploads/2020/03/SBA-Disaster-One-Pager.pdf' }, { name: t('RESOURCES.FEDERAL.SBA_FIND'), link: 'https://www.sba.gov/local-assistance/find/?type=SBA%20District%20Office&pageNumber=1' }, { name: t('RESOURCES.FEDERAL.SBDC.NAME'), desc: t('RESOURCES.FEDERAL.SBDC.DESC'), link: 'https://americassbdc.org/small-business-consulting-and-training/find-your-sbdc' }, { name: t('RESOURCES.FEDERAL.ECON'), link: 'https://edcollaborative.com/wp-content/uploads/2020/03/EDC-COVID19-BUSINESS-RESOURCE-GUIDE-1.pdf' }, { name: t('RESOURCES.FEDERAL.PERKINS'), link: 'https://www.perkinscoie.com/en/practices/litigation/insurance-law/coronavirus.html' }, { name: t('RESOURCES.FEDERAL.LEAVE'), link: 'https://advocacy.calchamber.com/2020/03/21/covid-19-federal-paid-leaves-explained/?utm_campaign=Daily-Headlines%20032320%20(2)&utm_source=Silverpop&utm_medium=Email&spMailingID=64368589&spUserID=MjM0ODIwMzYzNjQ1S0&spJobID=1842830252&spReportId=MTg0MjgzMDI1MgS2' } ] export const Commercial = (t) => [ { name: t('RESOURCES.COMMERCIAL.AMZN'), link: 'https://amazonsmallbusinessrelief.force.com/SelfRegisterPage' }, { name: t('RESOURCES.COMMERCIAL.FB'), link: 'https://www.facebook.com/business/boost/grants' }, { name: t('RESOURCES.COMMERCIAL.DIGITAL'), link: 'https://digital.com/blog/small-business-grants/' }, { name: t('RESOURCES.COMMERCIAL.UBER.NAME'), desc: t('RESOURCES.COMMERCIAL.UBER.DESC'), link: 'https://www.ubereats.com/?utm_source=AdWords_Brand&utm_campaign=search-google-brand_1_5_us-newyorkcity_e_txt_acq_cpc_en-us_%2Bubereat_aud-295840112290:kwd-128433093649_373015834279_33934106059_b_c&campaign_id=620791397&adg_id=33934106059&fi_id=&match=b&net=g&dev=c&dev_m=&cre=373015834279&kwid=aud-295840112290:kwd-128433093649&kw=%2Bubereat&placement=&tar=&&utm_source=AdWords_Brand&utm_campaign=search-google-brand_1_5_us-newyorkcity_e_txt_acq_cpc_en-us_%2Bubereat_aud-295840112290:kwd-128433093649_373015834279_33934106059_b_c&campaign_id=620791397&adg_id=33934106059&fi_id=&match=b&net=g&dev=c&dev_m=&cre=373015834279&kwid=aud-295840112290:kwd-128433093649&kw=%2Bubereat&placement=&tar=&gclsrc=aw.ds&gclid=CjwKCAjwvOHzBRBoEiwA48i6Am5FrWRTkue1gh67Dm_v8sdrJ4Lsn0VRIQ3wf8_9Q2cUPkPh6fNlLxoCy7gQAvD_BwE&gclsrc=aw.ds' }, { name: t('RESOURCES.COMMERCIAL.GOOGLE.NAME'), desc: t('RESOURCES.COMMERCIAL.GOOGLE.DESC'), link: 'https://cloud.google.com/blog/products/g-suite/helping-businesses-and-schools-stay-connected-in-response-to-coronavirus' } ] export const FinancialAid = (t) => [ { name: t('RESOURCES.FIN_AID.GUSTO'), link: 'https://gusto.com/blog/business-finance/coronavirus-relief-resources' }, { name: t('RESOURCES.FIN_AID.KIVA.NAME'), desc: t('RESOURCES.FIN_AID.KIVA.DESC'), link: 'https://www.kiva.org/borrow' }, { name: t('RESOURCES.FIN_AID.RELIEF'), link: 'https://www.eater.com/2020/3/17/21182293/coronavirus-relief-funds-restaurants-food-service-workers' }, { name: t('RESOURCES.FIN_AID.DUA'), link: 'https://oui.doleta.gov/unemploy/disaster.asp' }, { name: t('RESOURCES.FIN_AID.FILING_CA'), link: 'https://www.edd.ca.gov/Unemployment/Filing_a_Claim.htm' }, ] export const Health = (t) => [ { name: t('RESOURCES.HEALTH.HEADSPACE.NAME'), desc: t('RESOURCES.HEALTH.HEADSPACE.DESC'), link: 'https://www.headspace.com/covid-19' }, { name: t('RESOURCES.HEALTH.CRISIS.NAME'), desc: t('RESOURCES.HEALTH.CRISIS.DESC'), link: 'https://www.crisistextline.org/' }, { name: t('RESOURCES.HEALTH.EMPOWER.NAME'), desc: t('RESOURCES.HEALTH.EMPOWER.DESC'), link: 'https://www.empowerwork.org/' } ]<file_sep>/src/components/customIcons/index.js export { default as CopyLinkIcon } from './CopyLinkIcon'; <file_sep>/src/scenes/Onboard/index.js import React, { useState } from 'react'; import PropTypes from 'prop-types'; import { useHistory } from 'react-router-dom'; import { useTranslation, Trans } from 'react-i18next'; import { Row } from 'antd'; import Alert from 'components/Alert'; import { businessPagePath } from 'services/urlHelper'; import { useQueryParams } from 'hooks/useQueryParams'; import { STATUS_PENDING, STATUS_ACTIVE } from 'constants.js'; import OnboardForm from './components/Form'; import { ALERT_BUSINESS_ACTIVE, ALERT_BUSINESS_CLAIMED, ALERT_BUSINESS_PENDING, } from '../../constants'; import './style.scss'; const Onboard = (props) => { const { match: { params: { id } } } = props; const [alertProps, setAlertProps] = useState(null); const { businessName } = useQueryParams('businessName'); const history = useHistory(); const { t } = useTranslation(); const handleSuccess = (business) => { if (business.status === STATUS_PENDING) { history.push(businessPagePath(business.externalRefId, ALERT_BUSINESS_PENDING)); } else if (business.status === STATUS_ACTIVE) { history.push(businessPagePath(business.externalRefId, ALERT_BUSINESS_ACTIVE)); } else { history.push({ pathname: '/confirmation', state: { title: 'ONBOARD.CONFIRMATION.TITLE', descriptionKey: 'ONBOARD.CONFIRMATION.DESC', businessEmail: business.owner && business.owner.email, businessName: business.name, }, }); } }; const handleError = (businessId, err) => { if (err.response && err.response.status === 409) { history.push(businessPagePath(businessId, ALERT_BUSINESS_CLAIMED)); } else { setAlertProps({ description: t('SHARED.GENERIC_ERROR'), type: 'error' }); } }; return ( <div className="onboard container"> {alertProps && <Alert {...alertProps} />} <div className="head"> <h2>{t('ONBOARD.TITLE')}</h2> <Row justify="center"> <h6><Trans i18nKey="ONBOARD.DESC" /></h6> </Row> </div> <OnboardForm bizId={id} name={businessName} onError={handleError} onSuccess={handleSuccess} /> </div> ); }; Onboard.propTypes = { match: PropTypes.shape().isRequired, }; export default Onboard; <file_sep>/src/styles/antdOverrides/button.scss @import '../colors.scss'; .ant-btn { &.ant-btn-primary { background-color: $secondary-color; border-color: $secondary-color; &:hover, &:focus, &:active { color: white; background-color: $blue-900; } } &:hover, &:focus, &:active { color: $secondary-color; border-color: $secondary-color; background-color: $tertiary-200; } } .btn-outlined-action { border-color: $primary-color; color: $primary-color; background-color: white; &:hover, &:focus, &:active { background-color: $primary-color; color: white; border-color: $primary-color; } } .btn-outlined-secondary { border-color: $secondary-color; color: $secondary-color; background-color: white; &:hover, &:focus, &:active { background-color: $secondary-color; color: white; border-color: $secondary-color; } } .btn-filled-action { border-color: $primary-color; background-color: $primary-color; color: white; &:hover, &:focus, &:active { background-color: $orange-900; color: white; border-color: $primary-color; } } <file_sep>/src/scenes/Business/constants.js export const ALERT_GENERIC_ERROR = 'generic-error'; export const ALERT_ONBOARDING_SUCCESSFUL = 'onboarding-successful'; <file_sep>/src/App.scss @import 'styles/sizes.scss'; .layout.ant-layout { min-height: 100vh; } .container, .container.ant-menu { max-width: 1024px; margin-left: auto; margin-right: auto; } .content { background-color: white; margin-top: $header-height; min-height: calc(100vh - #{$header-height} - #{$footer-height}); // window height - header height - footer height :/ } <file_sep>/src/scenes/ForBusinesses/components/Description/index.js import React from 'react'; import { Row, Col } from 'antd'; import { useTranslation, Trans } from 'react-i18next'; import './style.scss'; const Description = () => { const { t } = useTranslation(); return ( <div className="for-biz-desc"> <Row justify="center"> <Col span={20}> <p><Trans i18nKey="FOR_BIZ.FIRST" /></p> <p><Trans i18nKey="FOR_BIZ.SECOND" /></p> <p><Trans i18nKey="FOR_BIZ.THIRD" /></p> <p>{t('FOR_BIZ.GET_STARTED')}</p> </Col> </Row> </div> ) } export default Description;<file_sep>/src/scenes/Resources/components/Section/style.scss .resource-section { margin-bottom: 16px; a { font-size: 16px; } p { font-size: 14px; } .resource { padding: 5px 0px 5px 0px; } }
b0e2065465e53d9fe388d67ca16f8c884863f85e
[ "Markdown", "SCSS", "JavaScript" ]
60
Markdown
Kempo/community-carecard
9b95c68dba3f397af37a49296117d0f54b03cc5d
ce6165415716c3eb9b72ebdf54baa63d76dbb2bb
refs/heads/master
<file_sep>implementation-class=com.seiginonakama.res.ResPkgRemakerPlugin
3f8c4a015c95addc5ee561631bc4b5b2d95155f3
[ "INI" ]
1
INI
kenkieo/ResPkgRemaker
419000148ae2dceabc0be25f600ad3d3e0edf739
a50c631fe623167660ea340b12434f8ee71c4da7
refs/heads/branch_marco_viernes
<file_sep>package uytubeLogic.logica; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import uytubeLogic.logica.SystemHandler.Privacidad; public class UsuarioHandler { private Map<String, Usuario> usuarios; private static UsuarioHandler instancia = null; private UsuarioHandler() { usuarios = new HashMap<String, Usuario>(); } public void aniadirUsuario(Usuario usuarioParticular) { usuarios.put(usuarioParticular.getNickname(), usuarioParticular); } public void removerUsuario(Usuario usuarioParticular) { usuarios.remove(usuarioParticular.getNickname()); } public Usuario find(String nickname) { return usuarios.get(nickname); } public boolean memberUsuario(Usuario usuarioParticular) { return usuarios.containsValue(usuarioParticular); } public boolean memberNickname(String nickUsuario) { return usuarios.containsKey(nickUsuario); } public boolean memberEmail(String emailUsuario) { Boolean existe = false; System.out.println("******Email logica:********"+ emailUsuario); for (Map.Entry<String, Usuario> entry : usuarios.entrySet()) { System.out.println("Email en el sistema"); System.out.println(entry.getValue().getEmail()); if (emailUsuario.equals(entry.getValue().getEmail())) existe = true; } return existe; } public String[] listarNicknamesUsuarios() { String[] nickUsuarios = new String[usuarios.size()]; Integer contador = 0; for (Map.Entry<String, Usuario> entry : usuarios.entrySet()) { String nickU = entry.getKey(); nickUsuarios[contador] = nickU; contador++; } return nickUsuarios; } public DtCanal[] listarCanalesPublicosPorNombre(String nombre) { List<DtCanal> listaUsuarios = new ArrayList<DtCanal>(); for (Map.Entry<String, Usuario> entry : usuarios.entrySet()) { if (entry.getValue().mostrarInfoCanal().getPrivado() == Privacidad.PUBLICO && (entry.getValue().mostrarInfoCanal().getNombre().toLowerCase().contains(nombre.toLowerCase()) ||entry.getValue().mostrarInfoCanal().getDescripcion().toLowerCase().contains(nombre.toLowerCase()))) listaUsuarios.add(entry.getValue().mostrarInfoCanal()); } DtCanal[] resultadosBusqueda = listaUsuarios.toArray(new DtCanal[0]); return resultadosBusqueda; } public DtListaReproduccion[] listarLDRPublicasPorNombre(String nombre) { List<DtListaReproduccion> listaLDR = new ArrayList<DtListaReproduccion>(); for (Map.Entry<String, Usuario> entry : usuarios.entrySet()) { DtListaReproduccion[] listasEnVideo = entry.getValue().getListas(); for (DtListaReproduccion lista : listasEnVideo) { if (lista.getPrivado() == Privacidad.PUBLICO && lista.getNombre().toLowerCase().contains(nombre.toLowerCase()) && !listaLDR.contains(lista)) { listaLDR.add(lista); } } } DtListaReproduccion[] resultadosBusqueda = listaLDR.toArray(new DtListaReproduccion[0]); return resultadosBusqueda; } public static UsuarioHandler getInstance() { if (instancia == null) instancia = new UsuarioHandler(); return instancia; } } <file_sep>/** * */ window.onload = function() { activarTab(document.getElementById("tabck-0")); }; //al cargar salga abierta la primera pestaña function activarTab(unTab) { try { //Los elementos div de todas las pestañas están todos juntos en una //única celda de la segunda fila de la tabla de estructura de pestañas. //Hemos de buscar la seleccionada, ponerle display block y al resto //ponerle display none. var id = unTab.id; if (id){ var tr = unTab.parentNode || unTab.parentElement; var tbody = tr.parentNode || tr.parentElement; var table = tbody.parentNode || tbody.parentElement; //Pestañas en varias filas if (table.getAttribute("data-filas")!=null){ var filas = tbody.getElementsByTagName("tr"); var filaDiv = filas[filas.length-1]; tbody.insertBefore(tr, filaDiv); } //Para compatibilizar con la versión anterior, si la tabla no tiene los //atributos data-min y data-max le ponemos los valores que tenían antes del //cambio de versión. var desde = table.getAttribute("data-min"); if (desde==null) desde = 0; var hasta = table.getAttribute("data-max"); if (hasta==null) hasta = MAXTABS; var idTab = id.split("tabck-"); var numTab = parseInt(idTab[1]); //Las "tabdiv" son los bloques interiores mientras que los "tabck" //son las pestañas. var esteTabDiv = document.getElementById("tabdiv-" + numTab); for (var i=desde; i<=hasta; i++) { var tabdiv = document.getElementById("tabdiv-" + i); if (tabdiv) { var tabck = document.getElementById("tabck-" + i); if (tabdiv.id == esteTabDiv.id) { tabdiv.style.display = "block"; tabck.style.color = "slategrey"; tabck.style.borderBottomColor = "rgb(235, 235, 225)"; } else { tabdiv.style.display = "none"; tabck.style.color = "white"; tabck.style.borderBottomColor = "gray"; } } } } } catch (e) { alert("Error al activar una pestaña. " + e.message); } }<file_sep>package uytubeLogic.logica; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import uytubeLogic.logica.SystemHandler.Privacidad; public class Categoria { private String nombre; private Map<Integer, Video> videos; private List<ListaReproduccion> lDReproduccion; public Categoria(String name) { nombre = name; videos = new HashMap<Integer, Video>(); lDReproduccion = new LinkedList<ListaReproduccion>(); } public void aniadirLDR(ListaReproduccion listaReproduccion) { if (!lDReproduccion.contains(listaReproduccion)) { lDReproduccion.add(listaReproduccion); } } public void removerLDR(ListaReproduccion listaReproduccion) { lDReproduccion.remove(listaReproduccion); } public String getNombre() { return nombre; } public void addVideo(Video video) { videos.put(video.getIDVideo(), video); } public void removerVideo(Video video) { videos.remove(video.getIDVideo()); } /* * public Video find(String s) { return videos.get(s); } */ public DtCategoria getInfoCategoria() { DtCategoria dataTipo = new DtCategoria(this); return dataTipo; } public DtVideo[] listarVideos(Privacidad priv, String nomU) { DtVideo[] resultado; Integer contador = 0; if (priv == null && nomU == null) { resultado = new DtVideo[videos.size()]; for (Map.Entry<Integer, Video> entry : videos.entrySet()) { resultado[contador] = new DtVideo(entry.getValue()); contador++; } } else if(priv!=null && nomU != null){ List<DtVideo> videosPriv = new ArrayList<DtVideo>(); for (Map.Entry<Integer, Video> entry : videos.entrySet()) { if (entry.getValue().getPrivacidad().equals(priv) || entry.getValue().getPropietario().equals(nomU)) { videosPriv.add(new DtVideo(entry.getValue())); } } resultado = videosPriv.toArray(new DtVideo[0]); }else{ resultado = new DtVideo[0]; } return resultado; } public DtListaReproduccion[] listarLDR(Privacidad priv, String nomU) { DtListaReproduccion[] resultado; Integer contador = 0; if (priv == null && nomU == null) { resultado = new DtListaReproduccion[lDReproduccion.size()]; for (ListaReproduccion lDReproduccion : lDReproduccion) { resultado[contador] = lDReproduccion.verDetallesListareproduccion(); contador++; } } else if(nomU!=null && priv!=null){ List<DtListaReproduccion> listasPriv = new ArrayList<DtListaReproduccion>(); for (ListaReproduccion lista : lDReproduccion) { DtListaReproduccion entry = lista.toDt(); if (entry.getPrivado().equals(priv) || entry.getPropietario().contentEquals(nomU)) listasPriv.add(entry); } resultado = listasPriv.toArray(new DtListaReproduccion[0]); }else{ resultado=new DtListaReproduccion[0]; } return resultado; } } <file_sep>package uytube.admin.videos; import java.util.ArrayList; import java.awt.Container; import java.awt.event.KeyEvent; import javax.swing.JMenu; import javax.swing.JMenuItem; import uytube.admin.videos.alta.AltaVideoMenuItem; import uytube.admin.videos.consultar.ConsultarVideoMenuItem; import uytube.admin.videos.modificar.ModificarVideoMenuItem;; public final class VideosMenu { private final JMenu menu = new JMenu("Videos"); private final ArrayList<JMenuItem> menuItems = new ArrayList<JMenuItem>(); private final Container container; public VideosMenu(final Container container) { this.container = container; initializeMenu(); } public JMenu getMenu() { return menu; } private void initializeMenu() { initializeMenuItems(); addMenuItemsToMenu(); menu.setMnemonic(KeyEvent.VK_V); } private void initializeMenuItems() { AltaVideoMenuItem altaVideoMenuItem = new AltaVideoMenuItem(container); ConsultarVideoMenuItem consultarVideoMenuItem = new ConsultarVideoMenuItem(container); ModificarVideoMenuItem modificarVideoMenuItem = new ModificarVideoMenuItem(container); menuItems.add(altaVideoMenuItem.getMenuItem()); menuItems.add(consultarVideoMenuItem.getMenuItem()); menuItems.add(modificarVideoMenuItem.getMenuItem()); } private void addMenuItemsToMenu() { for (JMenuItem menuItem : menuItems) { menu.add(menuItem); } } } <file_sep>package uytubeLogic.JUnitTests; import static org.junit.Assert.assertEquals; import java.util.Date; import org.junit.Test; import uytubeLogic.logica.Comentario; import uytubeLogic.logica.DtComentario; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.UsuarioCtrl; import uytubeLogic.logica.SystemHandler.Privacidad; public class ComentarioTest { @Test public void comentarioFunciones() { String nombreU = "nombreComentario"; UsuarioCtrl UCU = UsuarioCtrl.getInstance(); DtFecha fecha = new DtFecha(new Date(2)); UCU.nuevoUsuario(nombreU, "1234", "pedrito", "gimenez", "email.com", fecha, null, "nombrecito", "descripcion", Privacidad.PUBLICO, null); Comentario coment = new Comentario(1, "texto", fecha, true, nombreU); assertEquals(nombreU, coment.getUsuario().getNickname()); assertEquals(true, coment.getEsPadre()); assertEquals(fecha, coment.getFecha()); assertEquals((Integer) 1, coment.getIDComentario()); assertEquals(fecha, coment.getFecha()); Comentario respuesta = new Comentario(2, "textoR", fecha, false, nombreU); coment.addComentario(respuesta); DtComentario dtRespuesta = new DtComentario(respuesta); DtComentario dtActual = coment.getDtRespuestas()[0]; assertEquals(true, dtRespuesta.equals(dtActual)); } } <file_sep><%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <%@ page import="uytubeLogica.publicar.Privacidad"%> <%@ page import="uytubeLogica.publicar.DtCanal"%> <%@ page import="uytubeLogica.publicar.DtCategoria"%> <%@ page import="uytubeLogica.publicar.DtVideo"%> <%@ page import="uytubeLogica.publicar.DtListaReproduccion"%> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Detalles de Lista</title> <%@include file="../buscadorBootstrap.jsp"%> <%@include file="../cosasComunesDelHead.jsp"%> </head> <body> <script type="text/javascript"> function clickimg(index){ $('#verVideo'+index).click() } </script> <% DtListaReproduccion infoLista = (DtListaReproduccion) request.getAttribute("infoLista"); %> <div class="container"> <div class="container" id="nombreLista"> <div class="row"> <div class="col-xs-12" style="font-size: 10vw"> <h3> <%=infoLista.getNombre()%> <small> -<%=infoLista.getPrivado().toString()%>-</small></h3> </div> </div> </div> <div class="container-fluid" id="categorias"> <div class="row"> <div class="col-xs-8"> <h4 style="float: left">Categorias:</h4> </div> </div> <div class="row"> <% for (DtCategoria cat : infoLista.getCategoriasLDR()) { %> <div class="col-xs-4" style="border: 1px solid black; font-size: 3.8vw"> <div> <%=cat.getNombre()%> </div> </div> <% } %> </div> </div> <div class="container-fluid"> <div style="height: 10px"> <div class="row" style="height:100%"> <div class="col-xs-8"> <h4 style="float: left">Videos:</h4> </div> <% DtVideo[] videos = (DtVideo[]) request.getAttribute("videosLista"); int index = 0; for (DtVideo entry : videos) { %> <div class="h-25 d-inline-block"> <div class="col-xs-6"> <form action="watch" method="get"> <input type="hidden" name="opcion" value="ver"> <input type="hidden" name="ID" value="<%=entry.getIDVideo()%>"> <input id="verVideo<%=index%>" class="verAhora" type="submit" value="Ver Ahora" style="display: none;"> <img id="imagenVideo<%=index%>" src="<%=entry.getUrl()%>" class="img-thumbnail" alt="..." onclick="clickimg(<%=index%>)"> </form> <div class="text-truncate" style="white-space: nowrap; overflow: hidden; text-overflow: ellipsis;"> <%=entry.getNombre()%> </div> <div class="text-truncate" style="white-space: nowrap; overflow: hidden; text-overflow: ellipsis;"> <small>By <%=entry.getPropietario()%></small> </div> </div> </div> <% index++; } %> </div> </div> </div> </div> </body> </html><file_sep>package uytubeLogic.JUnitTests; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.util.Date; import uytubeLogic.logica.Categoria; import uytubeLogic.logica.CategoriaHandler; import uytubeLogic.logica.DtCategoria; import uytubeLogic.logica.DtComentario; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.DtInfoVideo; import uytubeLogic.logica.DtListaReproduccion; import uytubeLogic.logica.DtPuntuacion; import uytubeLogic.logica.DtUsuario; import uytubeLogic.logica.DtVideo; import uytubeLogic.logica.Particular; import uytubeLogic.logica.PorDefecto; import uytubeLogic.logica.UsuarioCtrl; import uytubeLogic.logica.Video; import uytubeLogic.logica.VideoCtrl; import uytubeLogic.logica.VideoHandler; import uytubeLogic.logica.SystemHandler.Privacidad; import org.junit.Test; //SECO public class VideoCtrlTest { @Test public void testGetInstance() { VideoCtrl VCU1, VCU2; VCU1 = VideoCtrl.getInstance(); VCU2 = VideoCtrl.getInstance(); assertEquals(true, VCU1 == VCU2); } @Test public void testListarLDRPorCategoriaNoExiste() { VideoCtrl VCU = VideoCtrl.getInstance(); DtListaReproduccion[] listaActual = VCU.listarLDRPorCategoria("noExisteLDRNE", null, null); assertEquals(null, listaActual); } @Test public void testListarLDRPorCategoriaExiste() { VideoCtrl VCU = VideoCtrl.getInstance(); CategoriaHandler CHU = CategoriaHandler.getInstance(); Categoria categoriaE = new Categoria("nombreCategoriaListarLDRE"); CHU.addCategoria(categoriaE); PorDefecto lr1 = new PorDefecto("nombListaListarLDRE", "duenioListarLDRE"); Particular lr2 = new Particular("nombLista2ListarLDRE", "duenio2ListarLDRE", Privacidad.PUBLICO); categoriaE.aniadirLDR(lr1); categoriaE.aniadirLDR(lr2); DtListaReproduccion[] listasActual = VCU.listarLDRPorCategoria("nombreCategoriaListarLDRE", null, null); DtListaReproduccion listaActual1 = listasActual[0]; DtListaReproduccion listaActual2 = listasActual[1]; DtListaReproduccion dtLista1 = new DtListaReproduccion(lr1); DtListaReproduccion dtLista2 = new DtListaReproduccion(lr2); assertEquals(true, listaActual2.equals(dtLista2)); assertEquals(true, listaActual1.equals(dtLista1)); } @Test public void testListarVideosPorCategoriaNoExiste() { VideoCtrl VCU = VideoCtrl.getInstance(); DtVideo[] listaActual = VCU.listarVideosPorCategoria("noExisteLVNE", null, null); assertEquals(null, listaActual); } @Test public void testListarVideosPorCategoriaExiste() { VideoCtrl VCU = VideoCtrl.getInstance(); CategoriaHandler CHU = CategoriaHandler.getInstance(); Categoria categoriaE = new Categoria("nombreCategoriaListarVE"); CHU.addCategoria(categoriaE); DtFecha fecha = new DtFecha(new Date(0)); DtCategoria[] cates = VCU.listarCategorias(); DtCategoria cate = cates[0]; Video video = new Video("nombreLPCE", "duenioLPCE", "descr", 20, fecha, "hola.com", cate, Privacidad.PRIVADO); Video video2 = new Video("nombre2LPCE", "duenio2LPCE", "descr2", 20, fecha, "hola2.com", cate, Privacidad.PRIVADO); categoriaE.addVideo(video); categoriaE.addVideo(video2); DtVideo[] videosActual = VCU.listarVideosPorCategoria("nombreCategoriaListarVE", null, null); DtVideo videoActual1 = videosActual[0]; DtVideo videoActual2 = videosActual[1]; DtVideo video1Dt = new DtVideo(video); DtVideo video2Dt = new DtVideo(video2); assertEquals(true, video1Dt.equals(videoActual1)); assertEquals(true, video2Dt.equals(videoActual2)); } @Test public void testComentarios() { VideoCtrl VCU = VideoCtrl.getInstance(); UsuarioCtrl UCU = UsuarioCtrl.getInstance(); DtFecha fecha = new DtFecha(new Date(0)); String duenioVideo = "duenioLC"; String comentadorVideo1 = "comentador1LC"; UCU.nuevoUsuario(duenioVideo, "Jose", "1234", "Perez", "email", fecha, null, "duenioCanal", "descripcion", Privacidad.PRIVADO, "nombreCategoria"); UCU.nuevoUsuario(comentadorVideo1, "1234", "Roberto", "Rodriguez", "email2", fecha, null, "duenioCanal2", "descripcion2", Privacidad.PRIVADO, "nombreCategoria"); String nombreVideo = "nombreListarComentarios"; UCU.aniadirVideo(duenioVideo, nombreVideo, "descr", 20, fecha, "hola.com", null, Privacidad.PRIVADO); DtVideo dtVideo = UCU.obtenerInfoAdicVideo(duenioVideo, nombreVideo); VCU.nuevoComentario(dtVideo.getiDVideo(), comentadorVideo1, fecha, "contenido"); Integer IDCOM = VCU.verDetallesVideoExt(dtVideo.getiDVideo()).getComentarios()[0].getIdComentario(); VCU.responderComentario(dtVideo.getiDVideo(), IDCOM, comentadorVideo1, fecha, "respuesta1"); DtComentario[] ComentActuales = VCU.listarComentarios(dtVideo.getiDVideo()); assertEquals(true, ComentActuales[0].isEsPadre() == true); assertEquals(true, ComentActuales[0].getNickUsuario() == comentadorVideo1); assertEquals(true, ComentActuales[0].getIdComentario() == IDCOM); assertEquals(true, ComentActuales[0].getRespuestas().length == 1); assertEquals(true, ComentActuales[0].getRespuestas()[0].getNickUsuario() == comentadorVideo1); assertEquals(true, ComentActuales[0].getRespuestas()[0].isEsPadre() == false); assertEquals(true, ComentActuales[0].getRespuestas()[0].getRespuestas().length == 0); } @Test public void testValorarVideo() { VideoCtrl VCU = VideoCtrl.getInstance(); UsuarioCtrl UCU = UsuarioCtrl.getInstance(); VideoHandler VHU = VideoHandler.getInstance(); DtFecha fecha = new DtFecha(new Date(0)); String duenioVideo = "duenioVV"; String gustaVideo1 = "gustaVV"; String noGustaVideo1 = "nogustaVV"; UCU.nuevoUsuario(duenioVideo, "1234", "Jose", "Perez", "email", fecha, null, "duenioCanal", "descripcion", Privacidad.PRIVADO, "nombreCategoria"); UCU.nuevoUsuario(gustaVideo1, "1234", "Juan", "Gimenez", "email3", fecha, null, "duenioCanal3", "descripcion3", Privacidad.PRIVADO, "nombreCategoria"); UCU.nuevoUsuario(noGustaVideo1, "1234", "Pablo", "Pereira", "email4", fecha, null, "duenioCanal4", "descripcion4", Privacidad.PRIVADO, "nombreCategoria"); String nombreVideo = "nombreValorarVideo"; UCU.aniadirVideo(duenioVideo, nombreVideo, "descr", 20, fecha, "hola.com", null, Privacidad.PRIVADO); DtVideo dtVideo = UCU.obtenerInfoAdicVideo(duenioVideo, nombreVideo); VCU.valorarVideo(dtVideo.getiDVideo(), gustaVideo1, true); VCU.valorarVideo(dtVideo.getiDVideo(), noGustaVideo1, false); Video videoSubido = VHU.find(dtVideo.getiDVideo()); DtPuntuacion[] puntuaciones = videoSubido.getPuntuaciones(); boolean u1true = false; boolean u2false = false; boolean u1false = false; for (DtPuntuacion entry : puntuaciones) { if (entry.getNickname().equals(gustaVideo1) && entry.getValoracion() == true) u1true = true; if (entry.getNickname().equals(noGustaVideo1) && entry.getValoracion() == false) u2false = true; } assertTrue(u1true); assertTrue(u2false); VCU.valorarVideo(dtVideo.getiDVideo(), gustaVideo1, false); puntuaciones = videoSubido.getPuntuaciones(); u2false = false; for (DtPuntuacion entry : puntuaciones) { if (entry.getNickname().equals(gustaVideo1) && entry.getValoracion() == false) u1false = true; if (entry.getNickname().equals(noGustaVideo1) && entry.getValoracion() == false) u2false = true; } assertTrue(u1false); assertTrue(u2false); } @Test public void testVerDetallesVideoExt() { VideoCtrl VCU = VideoCtrl.getInstance(); UsuarioCtrl UCU = UsuarioCtrl.getInstance(); VCU.crearCategoria("nombreCategoriaVDE"); DtCategoria[] cates = VCU.listarCategorias(); DtCategoria cate = cates[0]; DtFecha fecha = new DtFecha(new Date(0)); String nombreVideo = "nombreDVE"; String duenioVideo = "duenioDVE"; String comentadorVideo1 = "comentador1DVE"; String gustaVideo1 = "gustaDVE"; String noGustaVideo1 = "nogustaDVE"; UCU.nuevoUsuario(duenioVideo, "1234", "Jose", "Perez", "email", fecha, null, "duenioCanal", "descripcion", Privacidad.PRIVADO, "nombreCategoria"); UCU.nuevoUsuario(comentadorVideo1, "1234", "Roberto", "Rodriguez", "email2", fecha, null, "duenioCanal2", "descripcion2", Privacidad.PRIVADO, "nombreCategoria"); UCU.nuevoUsuario(gustaVideo1, "1234", "Juan", "Gimenez", "email3", fecha, null, "duenioCanal3", "descripcion3", Privacidad.PRIVADO, "nombreCategoria"); UCU.nuevoUsuario(noGustaVideo1, "1234", "Pablo", "Pereira", "email4", fecha, null, "duenioCanal4", "descripcion4", Privacidad.PRIVADO, "nombreCategoria"); DtUsuario gustaVideo = UCU.listarDatosUsuario(gustaVideo1); DtUsuario noGustaVideo = UCU.listarDatosUsuario(noGustaVideo1); UCU.aniadirVideo(duenioVideo, nombreVideo, "descr", 20, fecha, "hola.com", cate, Privacidad.PRIVADO); DtVideo dtVideo = UCU.obtenerInfoAdicVideo(duenioVideo, nombreVideo); VCU.nuevoComentario(dtVideo.getiDVideo(), comentadorVideo1, fecha, "hola que ase"); VCU.valorarVideo(dtVideo.getiDVideo(), gustaVideo1, true); VCU.valorarVideo(dtVideo.getiDVideo(), noGustaVideo1, false); DtInfoVideo infoActual = VCU.verDetallesVideoExt(dtVideo.getiDVideo()); DtComentario[] comentariosActual = infoActual.getComentarios(); DtUsuario[] gustaActual = infoActual.getUsuariosGusta(); DtUsuario[] noGustaActual = infoActual.getUsuariosNoGusta(); DtVideo dtVideoActual = infoActual.getInfoVideo(); // assert campo por campo assertEquals(true, comentariosActual.length == 1); assertEquals(true, dtVideo.equals(dtVideoActual)); assertEquals(true, gustaVideo.equals(gustaActual[0])); assertEquals(true, noGustaVideo.equals(noGustaActual[0])); // fail("Not yet implemented"); } @Test public void testInfoAddVideo() { VideoCtrl VCU = VideoCtrl.getInstance(); UsuarioCtrl UCU = UsuarioCtrl.getInstance(); VCU.crearCategoria("nombreCategoria"); DtCategoria[] cates = VCU.listarCategorias(); DtCategoria cate = cates[0]; DtFecha fecha = new DtFecha(new Date(0)); String nombreVideo = "nombreIAD"; String duenioVideo = "duenioIAD"; UCU.nuevoUsuario(duenioVideo, "1234", "Jose", "Perez", "email", fecha, null, "duenioCanal", "descripcion", Privacidad.PRIVADO, "nombreCategoria"); UCU.aniadirVideo(duenioVideo, nombreVideo, "descr", 20, fecha, "hola.com", cate, Privacidad.PRIVADO); DtVideo infoActual = VCU.infoAddVideo(UCU.obtenerInfoAdicVideo(duenioVideo, nombreVideo).getiDVideo()); assertEquals(true, infoActual.getNombre() == nombreVideo); assertEquals(true, infoActual.getCategoria().equals(cate)); assertEquals(true, infoActual.getDescripcion() == "descr"); assertEquals(true, infoActual.getPrivacidad() == Privacidad.PRIVADO); } @Test public void testListarVideosExisten() { VideoCtrl VCU = VideoCtrl.getInstance(); CategoriaHandler CHU = CategoriaHandler.getInstance(); Categoria categoriaE = new Categoria("nombreCategoria"); CHU.addCategoria(categoriaE); DtFecha fecha = new DtFecha(new Date(0)); DtCategoria cate1 = null; DtCategoria cate2 = new DtCategoria(categoriaE); Video video = new Video("nombre", "duenio", "descr", 20, fecha, "hola.com", cate1, Privacidad.PRIVADO); Video video2 = new Video("nombre2", "duenio2", "descr2", 20, fecha, "hola2.com", cate2, Privacidad.PRIVADO); categoriaE.addVideo(video2); DtVideo[] videosListados = VCU.listarVideos(); DtVideo videoActual1 = VCU.infoAddVideo(video.getIDVideo()); DtVideo videoActual2 = VCU.infoAddVideo(video2.getIDVideo()); DtVideo video1Dt = new DtVideo(video); DtVideo video2Dt = new DtVideo(video2); assertEquals(true, video2Dt.equals(videoActual2)); assertEquals(true, video1Dt.equals(videoActual1)); } @Test public void testListarCategorias() { VideoCtrl VCU = VideoCtrl.getInstance(); VCU.crearCategoria("cat1ListarCategorias"); VCU.crearCategoria("cat2ListarCategorias"); DtCategoria[] categorias = VCU.listarCategorias(); boolean existe = false; boolean existe2 = false; for (DtCategoria cat : categorias) { if (cat.getNombre() == "cat1ListarCategorias") { existe = true; } if (cat.getNombre() == "cat2ListarCategorias") { existe2 = true; } } assertEquals(true, existe); assertEquals(true, existe2); } @Test public void testCrearCategoria() { VideoCtrl VCU = VideoCtrl.getInstance(); CategoriaHandler CHU = CategoriaHandler.getInstance(); VCU.crearCategoria("categoria"); assertEquals(true, CHU.isMember("categoria")); assertEquals(true, VCU.existeCategoria("categoria")); } @Test public void testExisteCategoriaSi() { VideoCtrl VCU = VideoCtrl.getInstance(); VCU.crearCategoria("existe"); assertEquals(true, VCU.existeCategoria("existe")); } @Test public void testExisteCategoriaNo() { VideoCtrl VCU = VideoCtrl.getInstance(); assertEquals(false, VCU.existeCategoria("noExiste")); } @Test public void testListarVideosPublicosPorNombreEInfoVideoPorPrivacidad() { UsuarioCtrl UCU = UsuarioCtrl.getInstance(); VideoCtrl VCU = VideoCtrl.getInstance(); DtFecha fecha = new DtFecha(new Date(0)); String nombreU = "nombreLVPPN1"; String nombreU2 = "nombreLVPN2"; String nombreV1Publico = "nombreVLVPPN1"; String nombreV2Privado = "nombreVLVPPN2"; String nombreV3Publico = "nombreVLVPPN3"; UCU.nuevoUsuario(nombreU, "1234", "Jose", "Ramirez", "www.cosoarroba3", fecha, null, "canal", "descripcion", Privacidad.PUBLICO, null); UCU.nuevoUsuario(nombreU2, "1234", "Gimena", "Rodriguez", "www.cosoarroba4", fecha, null, "canal2", "descripcion2", Privacidad.PUBLICO, null); UCU.aniadirVideo(nombreU, nombreV1Publico, "descrito1", 40, fecha, "url1", null, Privacidad.PUBLICO); UCU.aniadirVideo(nombreU, nombreV2Privado, "descrito2", 30, fecha, "url2", null, Privacidad.PRIVADO); UCU.aniadirVideo(nombreU2, nombreV3Publico, "descrito3", 40, fecha, "url3", null, Privacidad.PUBLICO); DtVideo[] videosResultados = VCU.listarVideosPublicosPorNombre("nombreVLVPPN"); assertEquals(2, videosResultados.length); assertEquals(nombreV1Publico, videosResultados[0].getNombre()); assertEquals(nombreV3Publico, videosResultados[1].getNombre()); } } <file_sep> package uytubeLogica.publicar; import javax.xml.bind.annotation.XmlEnum; import javax.xml.bind.annotation.XmlType; /** * <p>Clase Java para privacidad. * * <p>El siguiente fragmento de esquema especifica el contenido que se espera que haya en esta clase. * <p> * <pre> * &lt;simpleType name="privacidad"&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}string"&gt; * &lt;enumeration value="PRIVADO"/&gt; * &lt;enumeration value="PUBLICO"/&gt; * &lt;/restriction&gt; * &lt;/simpleType&gt; * </pre> * */ @XmlType(name = "privacidad") @XmlEnum public enum Privacidad { PRIVADO, PUBLICO; public String value() { return name(); } public static Privacidad fromValue(String v) { return valueOf(v); } } <file_sep><%@page import="java.util.Locale"%> <%@page import="java.util.List"%> <%@page import="java.util.Iterator"%> <%@page import="java.text.SimpleDateFormat"%> <%@page import="java.text.DateFormat"%> <%@page import="uytubeLogica.publicar.DtListaReproduccion"%> <%@page import="uytubeLogica.publicar.DtCategoria"%> <%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <%@page errorPage="../error/error404.jsp" %> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <%@include file="../cosasComunesDelHead.jsp" %> <title>Listas Reproducción</title> <style type="text/css"> .verInfoButton{ background-color: #eee; border: none; color: #777; text-align: center; text-decoration: none; tex display: inline-block; font-size: 18px; } .verInfoButton:hover{ background-color: #eee; color: black; } </style> </head> <body> <%@include file="../buscadorBootstrap.jsp" %> <% if(request.getAttribute("listarListasReproduccion") != null){ DtListaReproduccion[] listas = (DtListaReproduccion[]) request.getAttribute("listarListasReproduccion"); } %> <div class="container-fluid" style="width : 100%; padding ; 5px 5px 5px 5px"> <div class="row"> <div class="col-xs-12"><h2>Listas reproducción</h2></div> </div> <ul class="list-group list-group-flush"> <div style="padding-left : 5%; padding-right : 5%; width : 100%;" class="container-fluid"> <%if(request.getAttribute("listarListasReproduccion") != null){ DtListaReproduccion[] listas=(DtListaReproduccion[]) request.getAttribute("listarListasReproduccion"); for(DtListaReproduccion entry: listas){ String nombreLista = entry.getNombre(); String[] nombresCategorias = new String[entry.getCategoriasLDR().size()]; %> <li class="list-group-item"> <div class="row"> <div class="col-xs-8"><%=nombreLista%></div> <div class="col-xs-4"> <form action="playlist" method="get"> <input type="hidden" name="action" value="details"> <input type="hidden" name="nameList" value="<%=nombreLista%>"> <input type="hidden" name="ownerList" value="<%=entry.getPropietario() %>"> <input class="verAhora verInfoButton" type="submit" value="Ver Info"> </form> </div> </div> </li> <% }//for para recorrer la lista }else{%> <%}//carga las listas if((DtListaReproduccion[]) request.getAttribute("listasPrivadasSesion")!=null){ DtListaReproduccion[] listasPrivadas=(DtListaReproduccion[]) request.getAttribute("listasPrivadasSesion"); for(DtListaReproduccion entry: listasPrivadas){ String nombreLista = entry.getNombre(); String[] nombresCategorias = new String[entry.getCategoriasLDR().size()]; %> <li class="list-group-item"> <div class="row"> <div class="col-xs-8"><%=nombreLista%></div> <div class="col-xs-4"> <form action="playlist" method="get"> <input type="hidden" name="action" value="details"> <input type="hidden" name="nameList" value="<%=nombreLista%>"> <input type="hidden" name="ownerList" value="<%=entry.getPropietario() %>"> <input class="verAhora verInfoButton" type="submit" value="Ver Info"> </form> </div> </div> </li> <% }//for para recorrer la lista }%> </div> </ul> </div> </body> </html><file_sep>package uytubeWeb.servlets; import java.io.IOException; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; import uytubeLogica.publicar.DtCategoria; import uytubeLogica.publicar.DtListaReproduccion; import uytubeLogica.publicar.DtVideo; import uytubeLogica.publicar.Privacidad; /** * Servlet implementation class CategoriaServlet */ @WebServlet(name="Categorias",urlPatterns={"/consult","/list"}) public class CategoriaServlet extends HttpServlet { private static final long serialVersionUID = 4L; /** * @see HttpServlet#HttpServlet() */ public CategoriaServlet() { super(); // TODO Auto-generated constructor stub } /** * @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse response) */ protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); String action = request.getParameter("action"); System.out.println("estoy en categoria servlet GET"); switch(action) { case "consult":{ String categoria = request.getParameter("type"); System.out.println(categoria); HttpSession session=request.getSession(false); if(session!=null) { String login=(String)session.getAttribute("nombre_usuario"); if(login!=null) { DtVideo[] videos=port.listarVideosPorCategoria(categoria, Privacidad.PUBLICO, login).getItem().toArray(new DtVideo[0]); DtListaReproduccion[] listaReproduccion=port.listarLDRPorCategoria(categoria, Privacidad.PUBLICO, login).getItem().toArray(new DtListaReproduccion[0]); request.setAttribute("videos", videos); request.setAttribute("listas", listaReproduccion); }else{ DtVideo[] videos=port.listarVideosPorCategoria(categoria, Privacidad.PUBLICO, "").getItem().toArray(new DtVideo[0]); DtListaReproduccion[] listaReproduccion=port.listarLDRPorCategoria(categoria, Privacidad.PUBLICO, "").getItem().toArray(new DtListaReproduccion[0]); request.setAttribute("videos", videos); request.setAttribute("listas", listaReproduccion); } } request.setAttribute("titulo", "Consulta de Categoria"); request.getRequestDispatcher("WEB-INF/Busqueda.jsp").forward(request, response); };break; case "listarEmbed":{ System.out.println("estoy aqui aqui para quereerte"); //DtCategoria[] categorias = interfazVideos.listarCategorias(); DtCategoria[] categorias= port.listarCategorias().getItem().toArray(new DtCategoria[0]); response.getWriter().append("<ul>"); for(DtCategoria entry:categorias) { response.getWriter().append("<li><a href='consult?action=consult&type="+entry.getNombre()+"'>"+entry.getNombre()+"</a> </li>"); } response.getWriter().append("</ul>"); } } //request.getRequestDispatcher("/WEB-INF/Categoria/consultaCategoria.jsp").forward(request, response); // TODO Auto-generated method stub //response.getWriter().append("Served at: ").append(request.getContextPath()); } /** * @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse response) */ protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { // TODO Auto-generated method stub doGet(request, response); } } <file_sep>package uytubeLogic.logica; import java.io.FileReader; import java.io.IOException; import java.util.Properties; public class PropertiesCtrl { private static PropertiesCtrl instance = null; public static PropertiesCtrl getInstance() { if (instance == null) instance = new PropertiesCtrl(); return instance; } public String getProperty(String property) throws IOException{ Properties prop = new Properties(); FileReader file = new FileReader(System.getProperty("user.home")+"/Desktop/"+"uytube.properties"); prop.load(file); return prop.getProperty(property); } } <file_sep>package uytubeLogic.logica; public class Puntuacion { private boolean valoracion; private Usuario usuarioPuntuador; public boolean getValoracion() { return valoracion; } public void setValoracion(boolean valoracionVideo) { valoracion = valoracionVideo; } public Usuario getUsuario() { return usuarioPuntuador; } public Puntuacion(String nickU, boolean gusta) { valoracion = gusta; UsuarioHandler usuHandler = UsuarioHandler.getInstance(); Usuario usuarioValorador = usuHandler.find(nickU); usuarioPuntuador = usuarioValorador; } public String getNickPuntuador() { return usuarioPuntuador.getNickname(); } } <file_sep>package uytubeLogic.JUnitTests; import static org.junit.Assert.fail; import org.junit.Test; public class UsuarioTest { @Test public void testUsuario() { fail("Not yet implemented"); } @Test public void testCreateCanal() { fail("Not yet implemented"); } @Test public void testEditarDatosUsuario() { fail("Not yet implemented"); } @Test public void testMostrarInfoCanal() { fail("Not yet implemented"); } @Test public void testListarDatosUsuario() { fail("Not yet implemented"); } @Test public void testAniadirUsuarioASeguir() { fail("Not yet implemented"); } @Test public void testRemoverUsuarioASeguir() { fail("Not yet implemented"); } @Test public void testListarVideosPorLDR() { fail("Not yet implemented"); } @Test public void testAgregarVideoLDR() { fail("Not yet implemented"); } } <file_sep> package uytubeLogica.publicar; import javax.xml.bind.annotation.XmlEnum; import javax.xml.bind.annotation.XmlType; /** * <p>Clase Java para tipoLista. * * <p>El siguiente fragmento de esquema especifica el contenido que se espera que haya en esta clase. * <p> * <pre> * &lt;simpleType name="tipoLista"&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}string"&gt; * &lt;enumeration value="PORDEFECTO"/&gt; * &lt;enumeration value="PARTICULAR"/&gt; * &lt;/restriction&gt; * &lt;/simpleType&gt; * </pre> * */ @XmlType(name = "tipoLista") @XmlEnum public enum TipoLista { PORDEFECTO, PARTICULAR; public String value() { return name(); } public static TipoLista fromValue(String v) { return valueOf(v); } } <file_sep>package uytubeLogic.logica; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; public abstract class ListaReproduccion { private String nombre; private String propietario; private Map<Integer, Video> videos; private Map<String, Categoria> categorias; public ListaReproduccion(String nombLDR, String pro) { nombre = nombLDR; propietario = pro; videos = new HashMap<Integer, Video>(); categorias = new HashMap<String, Categoria>(); } public Video getVideo(Integer idVideo) { return videos.get(idVideo); } public enum TipoLista { PORDEFECTO, PARTICULAR; } public String getNombre() { return nombre; } public void addVideoToMap(Video videoAAgregar) { videos.put(videoAAgregar.getIDVideo(), videoAAgregar); } public void removeVideoFromMap(Integer idVideo) { videos.remove(idVideo); } public abstract DtListaReproduccion verDetallesListareproduccion(); public String[] listarVideos() { String[] nombreVideos = new String[videos.size()]; Integer contador = 0; for (Map.Entry<Integer, Video> entry : videos.entrySet()) { nombreVideos[contador] = entry.getValue().getNombre(); contador++; } return nombreVideos; } public void refrescarCategorias() { for (Map.Entry<Integer, Video> entry : videos.entrySet()) { Categoria categoria = entry.getValue().getObjetoCategoria(); System.out.println("cate "+categoria.getNombre()); System.out.println("igual? "+categoria.getNombre().equals(SystemHandler.getInstance().getSinCat().getNombre())); if(!categoria.getNombre().equals(SystemHandler.getInstance().getSinCat().getNombre())) { categoria.aniadirLDR(this); categorias.put(categoria.getNombre(), entry.getValue() .getObjetoCategoria()); } } } public abstract void removerVideo(Integer idVideo); public abstract void agregarVideo(Video videoAAgregar); public DtCategoria[] getInfoCategorias() { DtCategoria[] res = new DtCategoria[categorias.size()]; int i = 0; for (Map.Entry<String, Categoria> entry : categorias.entrySet()) { res[i] = new DtCategoria(entry.getValue()); i++; } return res; } public String getPropietario() { return propietario; } public void setPropietario(String propietario) { this.propietario = propietario; } public DtVideo[] obtenerDtsVideosListaReproduccionUsuario(String nombreLista) { DtVideo[] dtvideos = new DtVideo[videos.size()]; int contador = 0; for (Map.Entry<Integer, Video> entry : videos.entrySet()) { DtVideo dt = new DtVideo(entry.getValue()); dtvideos[contador] = dt; contador++; } return dtvideos; } public boolean existeVideo(Video video) { return videos.containsKey(video.getIDVideo()); } public void removerCategoria(Categoria cat) { // TODO Auto-generated method stub categorias.remove(cat.getNombre()); } public Date getFechaUltimoVideo() { Date LastFecha = new Date(0); boolean found = false; for (Map.Entry<Integer, Video> entry : videos.entrySet()) { if (!found) { LastFecha = entry.getValue().getFechaPublicacion().getFecha(); found = true; } else if (entry.getValue().getFechaPublicacion().getFecha().before(LastFecha)) { LastFecha = entry.getValue().getFechaPublicacion().getFecha(); } } return LastFecha; } public abstract DtListaReproduccion toDt(); public abstract uyTubePersistencia.ListaReproduccion persistir(); public Map<Integer, Video> getVideos() { return videos; } public void setVideos(Map<Integer, Video> videos) { this.videos = videos; } public void eliminarVideos() { Iterator<Integer> it = videos.keySet().iterator(); while (it.hasNext()) { it.next(); it.remove(); // videos.remove(key); } } } <file_sep><%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <!DOCTYPE html> <html> <head> <link rel="stylesheet" href="media/styles/error404.css"> <meta charset="UTF-8"> <title>Error 404</title> </head> <body> <table height="100%" width="100%"> <tr><t id="titulo">ERROR 404</t><br></tr> <tr height="50%" width="50%"><img height="50%" width="50%" src="https://i.imgflip.com/26k52v.jpg"></img><br></tr> <tr> <t id="texto_aviso"> Intentaste entrar a una página que no existe. Por suerte esto los servidores no mueren por esto. Puedes <a href="casa">volver</a> al inicio. </t> </tr> </table> </body> </html><file_sep>package uytubeLogic.logica; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.HashMap; import java.util.Map; import uytubeLogic.logica.SystemHandler.Privacidad; public class Usuario { private String nickname; private String pass; private String nombre; private String apellido; private String email; private DtFecha fechaNacimiento; private byte[] foto; private Canal canalPropio; private Map<String, Usuario> usuariosQueSigue; private Map<String, Usuario> usuariosQueLeSiguen; public static byte[] imagenToByte(File archivo) { // imagen a byte[] try { byte[] imgFoto = new byte[(int) archivo.length()]; InputStream inte = new FileInputStream(archivo); inte.read(imgFoto); return imgFoto; } catch (Exception e) { System.out.println(e.getMessage()); } return null; } public Usuario(String nickU, String passU, String nombreU, String apellidoU, String emailU, DtFecha fechaNacU, byte[] fotoU, String nombreCanal, String descripcionCanal, Privacidad privacidadCanal, String catCanal) { // TODO Auto-generated constructor stub nickname = nickU; pass = <PASSWORD>; nombre = nombreU; apellido = apellidoU; email = emailU; fechaNacimiento = fechaNacU; PropertiesCtrl prop = PropertiesCtrl.getInstance(); String ubicacionFoto = null; try { ubicacionFoto =System.getProperty("user.home")+"/Desktop/"+prop.getProperty("fotos"); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } if (fotoU == null || (fotoU != null && fotoU.length == 0)) { // agregarle foto por defecto File archivo = new File(ubicacionFoto+"/usuarioPorDefecto.png"); foto = imagenToByte(archivo); } else { foto = fotoU; } usuariosQueSigue = new HashMap<String, Usuario>(); usuariosQueLeSiguen = new HashMap<String, Usuario>(); this.createCanal(nombreCanal, nickU, descripcionCanal, privacidadCanal, catCanal); ; } public void createCanal(String nombreCanal, String propietarioCanal, String descricpcionCanal, Privacidad privacidadCanal, String categoriaCanal) { canalPropio = new Canal(nombreCanal, propietarioCanal, descricpcionCanal, privacidadCanal, categoriaCanal); } public String getNickname() { return nickname; } public String getPassword() { return <PASSWORD>; } public String getNombre() { return nombre; } public String getApellido() { return apellido; } public DtFecha getFechaNac() { return fechaNacimiento; } public String getEmail() { return email; } public void editarDatosUsuario(String nuevoNombre, String nuevoApellido, DtFecha nuevaFechaNacimiento, byte[] fotoUsuario) { nombre = nuevoNombre; apellido = nuevoApellido; fechaNacimiento = nuevaFechaNacimiento; foto = fotoUsuario; } public DtCanal mostrarInfoCanal() { return canalPropio.mostrarInfoCanal(); } public DtUsuario listarDatosUsuario() { DtUsuario infoUsuario = new DtUsuario(this); return infoUsuario; } public byte[] getFoto() { return foto; } public void aniadirUsuarioASeguir(Usuario usuarioParticular) { usuariosQueSigue.put(usuarioParticular.getNickname(), usuarioParticular); } public void removerUsuarioASeguir(Usuario usuarioParticular) { usuariosQueSigue.remove(usuarioParticular.getNickname()); } public String[] listarUsuariosQueSigue() { String[] nicknames = new String[usuariosQueSigue.size()]; Integer contador = 0; for (Map.Entry<String, Usuario> entry : usuariosQueSigue.entrySet()) { nicknames[contador] = entry.getKey(); contador++; } return nicknames; } public void aniadirUsuarioQueLeSigue(Usuario usuarioParticular) { usuariosQueLeSiguen.put(usuarioParticular.getNickname(), usuarioParticular); } public void removerUsuarioQueLeSigue(Usuario usuarioParticular) { usuariosQueLeSiguen.remove(usuarioParticular.getNickname()); } public void aniadirVideo(String nombreVideo, String propietarioVideo, String descripcionVideo, Integer duracion, DtFecha fechaPublicacion, String url, DtCategoria catE, Privacidad privacidadVideo) { canalPropio.aniadirVideo(nombreVideo, propietarioVideo, descripcionVideo, duracion, fechaPublicacion, url, catE, privacidadVideo); } public void ingresarNuevosDatosVideo(String nom, String descripcionVideo, int duracion, DtFecha fechaPublicacion, String url, DtCategoria catE, Privacidad privacidadVideo) { canalPropio.ingresarNuevosDatosVideo(nom, descripcionVideo, duracion, fechaPublicacion, url, catE, privacidadVideo); } public String[] listarVideosCanal() { return canalPropio.listarVideosCanal(); } public DtListaReproduccion verDetallesListareproduccion(String nombreLista) { return canalPropio.verDetallesListareproduccion(nombreLista); } public String[] listarListasReproduccion() { return canalPropio.listarListasReproduccion(); } public String[] listarUsuariosQueLeSigue() { String[] nicknames = new String[usuariosQueLeSiguen.size()]; Integer contador = 0; for (Map.Entry<String, Usuario> entry : usuariosQueLeSiguen.entrySet()) { nicknames[contador] = entry.getKey(); contador++; } return nicknames; } public String[] listarVideosPorLDR(String nombreLDR) { return canalPropio.listarVideosPorLDR(nombreLDR); } public void agregarVideoLDR(Integer idVideo, String nombreLDR) { canalPropio.agregarVideoLDR(idVideo, nombreLDR); } public void nuevaListaParticular(String nombreL, String pro, Privacidad privada) { Particular ldr = new Particular(nombreL, pro, privada); canalPropio.addListaReproduccion(ldr); } public void cambiarPrivLDR(String nombreL, Privacidad privE) { canalPropio.cambiarPrivLDR(nombreL, privE); } public void eliminarVideoLista(Integer id_video, String nombreLDR) { canalPropio.eliminarVideoLista(id_video, nombreLDR); } public Boolean memberListaReproduccionPropia(String nombreLista) { return canalPropio.memberListaReproduccionPropia(nombreLista); } public void nuevaListaPorDefecto(String nombreL) { canalPropio.nuevaListaPorDefecto(nombreL, nickname); } public DtVideo obtenerInfoAdicVideo(String nombreVideo) { return canalPropio.obtenerInfoAdicVideo(nombreVideo); } public Boolean memberVideoEnUsuario(String nombreVideo) { return canalPropio.memberVideoEnUsuario(nombreVideo); } public String[] listarVideosListaReproduccionUsuario(String nombreLista) { return canalPropio.listarVideosListaReproduccionUsuario(nombreLista); } public DtVideo[] obtenerDtsVideosListaReproduccionUsuario(String nombreLista) { // TODO Auto-generated method stub return canalPropio.obtenerDtsVideosListaReproduccionUsuario(nombreLista); } public boolean memberVideoLista(int idVideo, String nombreListaReproduccion) { return canalPropio.memberVideoLista(idVideo, nombreListaReproduccion); } public void modificarDatosCanal(String nombreCanal, String descripcion, Privacidad privacidad, String catE) { canalPropio.modificarDatosCanal(nombreCanal, descripcion, privacidad, catE); } public String[] listarLDRParticularesdeUsuario() { return canalPropio.listarLDRParticularesdeUsuario(); } public DtVideo[] infoVideosCanal(String filtro, Privacidad priv) { return canalPropio.infoVideosCanal(filtro, priv); } public DtListaReproduccion[] infoLDRdeUsuario(String filtro, Privacidad priv) { return canalPropio.infoLDRdeUsuario(filtro, priv); } public DtListaReproduccion[] getListas() { return canalPropio.getListas(); } public void eliminarVideo(String videoNombre){ canalPropio.eliminarVideoCanal(videoNombre); } public void agregarVisita(Integer id_video) { canalPropio.agregarVisita(id_video); } public DtVideoHistorial[] listarVideoHistorial() { return canalPropio.getFavoritoHistorico(); } public uyTubePersistencia.Usuario persistir() { uyTubePersistencia.Usuario UsuarioP = new uyTubePersistencia.Usuario(this); uyTubePersistencia.Canal CanalP = canalPropio.persistir(); UsuarioP.setCanalPropio(CanalP); return UsuarioP; // TODO Auto-generated method stub } public void eliminarTodosVideoLista(String nomLista) { canalPropio.eliminarTodosVideoLista(nomLista); } } <file_sep>package uytube.admin.videos.modificar; import java.awt.Container; import javax.swing.JInternalFrame; public final class ModificarVideoInternalFrame { private final JInternalFrame internalFrame = new JInternalFrame(); private Integer videoId; private final Container container; public ModificarVideoInternalFrame(Container container) { this.container = container; this.container.add(internalFrame); initializeInternalFrame(); } private void initializeInternalFrame() { internalFrame.setIconifiable(true); internalFrame.setMaximizable(true); internalFrame.setVisible(false); internalFrame.setTitle("Modificar Video"); internalFrame.setClosable(true); internalFrame.setResizable(true); internalFrame.setSize(330, 500); addContentToInternalFrame(); } private void addContentToInternalFrame() { ModificarVideoFormPanel altaVideoFormPanel = new ModificarVideoFormPanel(internalFrame, videoId); internalFrame.getContentPane().add(altaVideoFormPanel.getPanel()); } public void setVideo(final Integer videoId) { this.videoId = videoId; } public void show() { internalFrame.show(); } public void hide() { internalFrame.hide(); } } <file_sep>package uytubeWeb.servlets; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; import uytubeLogica.publicar.DtCanal; import uytubeLogica.publicar.DtListaReproduccion; import uytubeLogica.publicar.DtVideo; import uytubeLogica.publicar.Privacidad; /** * Servlet implementation class BusquedaServlet */ @WebServlet("/search") public class BusquedaServlet extends HttpServlet { private static final long serialVersionUID = 3L; /** * @see HttpServlet#HttpServlet() */ public BusquedaServlet() { super(); // TODO Auto-generated constructor stub } /** * @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse * response) */ protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setCharacterEncoding("UTF-8"); // TODO Auto-generated method stub String busqueda="busqueda"; String defaultBusqueda=""; System.out.println(request.getParameter(busqueda)); if(request.getParameter(busqueda)==null) { request.setAttribute(busqueda, defaultBusqueda); } else { request.setAttribute(busqueda, request.getParameter(busqueda)); } uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); DtVideo[] videos = port.listarVideosPublicosPorNombre((String)request.getAttribute(busqueda)).getItem().toArray(new DtVideo[0]); DtCanal[] canales = port.listarCanalesPublicosPorNombre((String)request.getAttribute(busqueda)).getItem().toArray(new DtCanal[0]); DtListaReproduccion[] listas = port.listarLDRPublicasPorNombre((String)request.getAttribute(busqueda)).getItem().toArray(new DtListaReproduccion[0]); HttpSession session=request.getSession(false); if(session!=null) { String login=(String)session.getAttribute("nombre_usuario"); if(login!=null) { System.out.println("hay un usuario logueado"); DtVideo[] videosPrivadosSesion=port.infoVideosCanal((String)request.getAttribute(busqueda),login, Privacidad.PRIVADO).getItem().toArray(new DtVideo[0]); DtListaReproduccion[] listasPrivadasSesion=port.infoLDRdeUsuario((String)request.getAttribute(busqueda),login, Privacidad.PRIVADO).getItem().toArray(new DtListaReproduccion[0]); List<DtVideo> videosAux= new ArrayList<DtVideo>(Arrays.asList(videos)); videosAux.addAll(Arrays.asList(videosPrivadosSesion)); videos=videosAux.toArray(new DtVideo[0]); List<DtListaReproduccion> listasAux = new ArrayList<DtListaReproduccion>(Arrays.asList(listas)); listasAux.addAll(Arrays.asList(listasPrivadasSesion)); listas=listasAux.toArray(new DtListaReproduccion[0]); } } String parametroListas="listas"; String parametroCanales="canales"; String parametroVideos="videos"; request.setAttribute(parametroListas, listas); request.setAttribute(parametroCanales, canales); if(request.getHeader("User-Agent").indexOf("Mobile") != -1) { for(DtVideo video: videos) { video.setUrl("https://img.youtube.com/vi/"+video.getUrl().substring(30)+"/hqdefault.jpg"); System.out.println(video.getUrl()); } request.setAttribute(parametroVideos, videos); request.getRequestDispatcher("/WEB-INF/BusquedaMobile.jsp").forward(request, response); } else { request.setAttribute(parametroVideos, videos); request.setAttribute("titulo", "Resultados de Busqueda"); request.getRequestDispatcher("/WEB-INF/Busqueda.jsp").forward(request, response); } } /** * @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse * response) */ protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { // TODO Auto-generated method stub doGet(request, response); } } <file_sep>package uytube.datosPrueba; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import uytubeLogic.logica.DtCategoria; import uytubeLogic.logica.DtComentario; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.DtVideo; import uytubeLogic.logica.Fabrica; import uytubeLogic.logica.IUsuarioCtrl; import uytubeLogic.logica.IVideoCtrl; import uytubeLogic.logica.PropertiesCtrl; import uytubeLogic.logica.VideoHandler; import uytubeLogic.logica.SystemHandler.Privacidad; public class DatosDePrueba { private Fabrica fabrica = Fabrica.getInstance(); private IUsuarioCtrl ICU = fabrica.getIUsuarioCtrl(); private IVideoCtrl ICV = fabrica.getIVideoCtrl(); public static byte[] imagenToByte(File archivo){ //imagen a byte[] try{ byte[] imgFoto = new byte[(int) archivo.length()]; InputStream inte = new FileInputStream(archivo); inte.read(imgFoto); return imgFoto; }catch(Exception e){ System.out.println(e.getMessage());} return null; } public void cargarDatosDePrueba() throws IOException{ PropertiesCtrl prop = PropertiesCtrl.getInstance(); System.out.println("la property foto es: "+prop.getProperty("fotos")); String ubicacionFoto=System.getProperty("user.home")+"/Desktop/"+prop.getProperty("fotos"); //CATEGORIAS String MUS="Musica";ICV.crearCategoria(MUS); String DEP="Deporte";ICV.crearCategoria(DEP); String CAR="Carnaval";ICV.crearCategoria(CAR); String NOT="Noticias";ICV.crearCategoria(NOT); String ENT="Entretenimiento";ICV.crearCategoria(ENT); String COM="Comida";ICV.crearCategoria(COM); String JUE="Videojuegos";ICV.crearCategoria(JUE); String CYT="Ciencia y Tecnologia";ICV.crearCategoria(CYT); String ONG="ONG y activismo";ICV.crearCategoria(ONG); String GEN="Gente y blogs";ICV.crearCategoria(GEN); String MAS="Mascotas y animales";ICV.crearCategoria(MAS); String VIA="Viajes y eventos";ICV.crearCategoria(VIA); //USUARIOS //las categorias de los canales se las puse para completar datos Date fecha; DtFecha fechaNac; Integer duracion = 1; File archivo; //public void nuevoUsuario(String nick, String nom, String ape, String e, DtFecha fn, byte[] fo, String nomCanal, // String desc, Boolean privacidadE, String catE) fecha = asignarFecha("25,02,1962 00:00"); fechaNac = new DtFecha(fecha); String HR="hrubio"; archivo = new File(ubicacionFoto+"/hr.jpg"); ICU.nuevoUsuario(HR,"Rufus123", "Horacio", "Rubino", "<EMAIL>", fechaNac, imagenToByte(archivo), "Canal Horacio", "El canal Horacio es para publicar contenido divertido",Privacidad.PUBLICO, "Entretenimiento"); fecha = asignarFecha("14,06,1972 00:00"); fechaNac = new DtFecha(fecha); String MB ="mbusca"; archivo = new File(ubicacionFoto+"/mb.jpg"); ICU.nuevoUsuario(MB,"Cookier234", "Martin", "Buscaglia", "<EMAIL>", fechaNac, imagenToByte(archivo), "El bocha", "Mi canal para colgar cosas", Privacidad.PUBLICO, null); fecha = asignarFecha("07,01,1954 00:00"); fechaNac = new DtFecha(fecha); String HG="hectorg"; ICU.nuevoUsuario(HG,"Poncho345", "Hector", "Guido", "<EMAIL>", fechaNac, null, HG, "Canal HG", Privacidad.PUBLICO, null); fecha=asignarFecha("24,07,1971 00:00"); fechaNac = new DtFecha(fecha); String TC ="tabarec"; archivo = new File(ubicacionFoto+"/tc.jpg"); ICU.nuevoUsuario(TC,"Ketchup1", "Tabare", "Cardozo", "<EMAIL>", fechaNac, imagenToByte(archivo), "Tabare", "Mi musica e ainda mais", Privacidad.PUBLICO, MUS); fecha = asignarFecha("01,01,1947 00:00"); fechaNac = new DtFecha(fecha); String CS="cachilas"; archivo = new File(ubicacionFoto+"/cs.jpg"); ICU.nuevoUsuario(CS,"Sancho456", "Walder 'Cachila'", "Silva", "<EMAIL>", fechaNac, imagenToByte(archivo), "El Cachila", "Para juntar cosas", Privacidad.PRIVADO, null); fecha =asignarFecha("16,03,1967 00:00"); fechaNac = new DtFecha(fecha); String JB="juliob"; ICU.nuevoUsuario(JB,"Salome56", "Julio", "Bocca", "<EMAIL>", fechaNac, null, JB, "Canal de JB", Privacidad.PUBLICO,null); fecha=asignarFecha("01,01,1975 00:00"); fechaNac = new DtFecha(fecha); String DP="diegop"; ICU.nuevoUsuario(DP,"Ruffo678", "Diego", "Parodi", "<EMAIL>", fechaNac, null, DP, "El Canal de DP", Privacidad.PUBLICO,null); fecha=asignarFecha("25,04,1840 00:00"); fechaNac = new DtFecha(fecha); String KH="kairoh"; archivo = new File(ubicacionFoto+"/kh.jpg"); ICU.nuevoUsuario(KH,"Corbata15", "Kairo", "Herrera", "<EMAIL>", fechaNac, imagenToByte(archivo), "Kairo Musica", "Videos de grandes canciones de hoy y siempre", Privacidad.PUBLICO, MUS); fecha=asignarFecha("03,08,1940 00:00"); fechaNac = new DtFecha(fecha); String RH="robinh"; ICU.nuevoUsuario(RH,"Aquiles67", "Robin", "Henderson", "<EMAIL>", fechaNac, null, RH, "Henderson", Privacidad.PUBLICO, null); fecha=asignarFecha("01,04,1960 00:00"); fechaNac = new DtFecha(fecha); String MT="marcelot"; ICU.nuevoUsuario(MT,"Mancha890", "Marcelo", "Tinelli", "<EMAIL>", fechaNac, null, "Tinelli total", "Todo lo que querias y mas!", Privacidad.PUBLICO , ENT); fecha=asignarFecha("17,07,1952 00:00"); fechaNac = new DtFecha(fecha); String EN="novick"; ICU.nuevoUsuario(EN,"Xenon987", "Edgardo", "Novick", "<EMAIL>", fechaNac, null, "Con la gente", "Preparando las elecciones", Privacidad.PUBLICO, null); fecha=asignarFecha("28,01,1950 00:00"); fechaNac = new DtFecha(fecha); String SP="sergiop"; ICU.nuevoUsuario(SP,"Sultan876", "Sergio", "Puglia", "<EMAIL>", fechaNac, null, "Puglia invita", "Programas del ciclo y videos de cocina mastercheef", Privacidad.PUBLICO, COM); fecha=asignarFecha("17,03,1976 00:00"); fechaNac = new DtFecha(fecha); String AR="chino"; archivo = new File(ubicacionFoto+"/ar.jpg"); ICU.nuevoUsuario(AR,"Laika765", "Alvaro", "Recoba", "<EMAIL>", fechaNac, imagenToByte(archivo), "<NAME>", "Canal de goles con Nacional", Privacidad.PRIVADO, DEP); fecha=asignarFecha("14,02,1955 00:00"); fechaNac = new DtFecha(fecha); String AP="tonyp"; archivo = new File(ubicacionFoto+"/ap.jpg"); ICU.nuevoUsuario(AP,"Kitty543", "Antonio", "Pacheco", "<EMAIL>", fechaNac, imagenToByte(archivo), "<NAME>", "Todos los goles con Peñarol", Privacidad.PRIVADO, DEP); fecha=asignarFecha("09,08,1960 00:00"); fechaNac = new DtFecha(fecha); String NJ="nicoJ"; ICU.nuevoUsuario(NJ,"Albino80", "Nicolas", "Jodal", "<EMAIL>", fechaNac, null, "Desde Genexus", "Canal informacion C y T", Privacidad.PUBLICO, CYT); //SEGUIDORES raiz/destino ICU.seguirUsuario(HR,HG);ICU.seguirUsuario(HR,DP); ICU.seguirUsuario(MB,TC);ICU.seguirUsuario(MB,CS);ICU.seguirUsuario(MB,KH); ICU.seguirUsuario(HG,MB);ICU.seguirUsuario(HG,JB); ICU.seguirUsuario(TC,HR);ICU.seguirUsuario(TC,CS); ICU.seguirUsuario(CS,HR); ICU.seguirUsuario(JB,MB);ICU.seguirUsuario(JB,DP); ICU.seguirUsuario(DP,HG); ICU.seguirUsuario(KH,SP); ICU.seguirUsuario(RH,HG);ICU.seguirUsuario(RH,JB);ICU.seguirUsuario(RH,DP); ICU.seguirUsuario(MT,CS);ICU.seguirUsuario(MT,JB);ICU.seguirUsuario(MT,KH); ICU.seguirUsuario(EN,HR);ICU.seguirUsuario(EN,TC);ICU.seguirUsuario(EN,CS); ICU.seguirUsuario(SP,MB);ICU.seguirUsuario(SP,JB);ICU.seguirUsuario(SP,DP); ICU.seguirUsuario(AR,AP); ICU.seguirUsuario(AP,AR); ICU.seguirUsuario(NJ,DP); //VIDEOS DtCategoria catMUS = new DtCategoria(MUS); DtCategoria catNOT = new DtCategoria(NOT); DtCategoria catCAR = new DtCategoria(CAR); DtCategoria catDEP = new DtCategoria(DEP); DtCategoria catCYT = new DtCategoria(CYT); String V1="Locura celeste"; ICU.aniadirVideo(TC, V1, null, duracion, fechaNac,"https://youtu.be/PAfbzKcePx0",catMUS, Privacidad.PRIVADO); ICU.aniadirVideo(CS, V1, null, duracion, fechaNac,"https://youtu.be/PAfbzKcePx0",catMUS, Privacidad.PRIVADO); String V2="Niño payaso"; ICU.aniadirVideo(TC, V2, null, duracion, fechaNac,"https://youtu.be/K-uEIUnyZPg",catMUS, Privacidad.PRIVADO); ICU.aniadirVideo(CS, V2, null, duracion, fechaNac,"https://youtu.be/K-uEIUnyZPg",catMUS, Privacidad.PRIVADO); String V3="Sweet child'o mine"; ICU.aniadirVideo(JB, V3, null, duracion, fechaNac,"https://youtu.be/1w7OgIMMRc4",catMUS,Privacidad.PUBLICO); ICU.aniadirVideo(KH, V3, null, duracion, fechaNac,"https://youtu.be/1w7OgIMMRc4",catMUS,Privacidad.PUBLICO); String V4="Dancing in the Dark"; ICU.aniadirVideo(KH, V4, null, duracion, fechaNac,"https://youtu.be/129kuDCQtHs", catMUS, Privacidad.PUBLICO); String V5="Thriller"; ICU.aniadirVideo(JB, V5, null, duracion, fechaNac,"https://youtu.be/sOnqjkJTMaA",catMUS,Privacidad.PUBLICO); ICU.aniadirVideo(KH, V5, null, duracion, fechaNac,"https://youtu.be/sOnqjkJTMaA",catMUS,Privacidad.PUBLICO); String V6="100 años de FING"; ICU.aniadirVideo(HG, V6, null, duracion, fechaNac,"https://youtu.be/peGS4TBxSaI",catNOT ,Privacidad.PUBLICO); String V7="50 años del InCo"; ICU.aniadirVideo(HG, V7, null, duracion, fechaNac,"https://youtu.be/GzOJSk4urlM",catNOT , Privacidad.PUBLICO); String V8="Ingenieria de Muestra 2017"; ICU.aniadirVideo(HG, V8, null, duracion, fechaNac,"https://youtu.be/RnaYRA1k5j4", catNOT, Privacidad.PUBLICO); String V9="Etapa A contramano Liguilla"; ICU.aniadirVideo(CS, V9, null, duracion, fechaNac,"https://youtu.be/Es6GRMHXeCQ",catCAR , Privacidad.PRIVADO); String V10="Etapa <NAME>"; ICU.aniadirVideo(CS, V10, null, duracion, fechaNac,"https://youtu.be/I_spHBU9ZsI",catCAR , Privacidad.PRIVADO); String V11="Show de Goles"; ICU.aniadirVideo(JB, V11, null, duracion, fechaNac,"https://youtu.be/g46w4_kD_lA", catDEP, Privacidad.PUBLICO); String V12="Pacheco goles mas recordados"; ICU.aniadirVideo(TC, V12, null, duracion, fechaNac,"https://youtu.be/wlEd6-HsIxI", catDEP, Privacidad.PRIVADO); ICU.aniadirVideo(AP, V12, null, duracion, fechaNac,"https://youtu.be/wlEd6-HsIxI", catDEP, Privacidad.PRIVADO); String V13="Inaguracion Estadio Peñarol"; ICU.aniadirVideo(JB, V13, null, duracion, fechaNac,"https://youtu.be/U6XPJ8Vz72A",catDEP ,Privacidad.PUBLICO); String V14="Recoba 20 mejores goles"; ICU.aniadirVideo(CS, V14, null, duracion, fechaNac,"https://youtu.be/Gy3fZhWdLEQ", catDEP, Privacidad.PRIVADO); ICU.aniadirVideo(AR, V14, null, duracion, fechaNac,"https://youtu.be/Gy3fZhWdLEQ", catDEP, Privacidad.PRIVADO); String V15="Entrevista a director CUTI"; ICU.aniadirVideo(NJ, V15, null, duracion, fechaNac,"https://youtu.be/Eq5uBEzI6qs",catCYT,Privacidad.PUBLICO); String V16="Ventana al futuro Uruguay y deficit de ingenieros"; ICU.aniadirVideo(NJ, V16, null, duracion, fechaNac,"https://youtu.be/zBR2pnASlQE",catCYT ,Privacidad.PUBLICO); //LISTAS PARTICULARES //tienen categoria?? //ICU.nuevaListaParticular(String nickU, String nombreL, Boolean privada) String LP1="Nostalgia"; ICU.nuevaListaParticular(KH,LP1,Privacidad.PUBLICO); //MUS LP1 String LP2="De fiesta"; ICU.nuevaListaParticular(TC,LP2,Privacidad.PRIVADO);//MUS DEP LP2 String LP3="Novedades FING"; ICU.nuevaListaParticular(HG,LP3,Privacidad.PUBLICO);//NOT LP3 String LP4="De todo un poco";ICU.nuevaListaParticular(CS,LP4,Privacidad.PRIVADO);//MUS DEP CAR LP4 String LP5="Noticias y CYT";ICU.nuevaListaParticular(NJ,LP5,Privacidad.PUBLICO); //NOT CYT LP5 String LP6="Solo deportes";ICU.nuevaListaParticular(JB,LP6,Privacidad.PUBLICO); //dep LP6 //DtVideo video = ICU.obtenerInfoAdicVideo(nicknameAutor, nombreVideo); // int idVideo = video.getIDVideo(); //ICU.agregarVideoLista(nicknameUsuario, idVideo, nombreListaReproduccion); DtVideo video = ICU.obtenerInfoAdicVideo(KH,V3); int idVideo = video.getiDVideo(); ICU.agregarVideoLista(KH,idVideo,LP1); video = ICU.obtenerInfoAdicVideo(KH,V4); idVideo = video.getiDVideo(); ICU.agregarVideoLista(KH,idVideo,LP1); video = ICU.obtenerInfoAdicVideo(JB,V5); idVideo = video.getiDVideo(); ICU.agregarVideoLista(KH,idVideo,LP1); video = ICU.obtenerInfoAdicVideo(TC,V1); idVideo = video.getiDVideo(); ICU.agregarVideoLista(TC,idVideo,LP2); video = ICU.obtenerInfoAdicVideo(TC,V2); idVideo = video.getiDVideo(); ICU.agregarVideoLista(TC,idVideo,LP2); video = ICU.obtenerInfoAdicVideo(JB,V11); idVideo = video.getiDVideo(); ICU.agregarVideoLista(TC,idVideo,LP2); video = ICU.obtenerInfoAdicVideo(CS,V10); idVideo = video.getiDVideo(); ICU.agregarVideoLista(TC,idVideo,LP2); video = ICU.obtenerInfoAdicVideo(HG,V6); idVideo = video.getiDVideo(); ICU.agregarVideoLista(HG,idVideo,LP3); video = ICU.obtenerInfoAdicVideo(HG,V7); idVideo = video.getiDVideo(); ICU.agregarVideoLista(HG,idVideo,LP3); video = ICU.obtenerInfoAdicVideo(HG,V8); idVideo = video.getiDVideo(); ICU.agregarVideoLista(HG,idVideo,LP3); video = ICU.obtenerInfoAdicVideo(CS,V1); idVideo = video.getiDVideo(); ICU.agregarVideoLista(CS,idVideo,LP4); video = ICU.obtenerInfoAdicVideo(CS,V2); idVideo = video.getiDVideo(); ICU.agregarVideoLista(CS,idVideo,LP4); video = ICU.obtenerInfoAdicVideo(CS,V9); idVideo = video.getiDVideo(); ICU.agregarVideoLista(CS,idVideo,LP4); video = ICU.obtenerInfoAdicVideo(CS,V10); idVideo = video.getiDVideo(); ICU.agregarVideoLista(CS,idVideo,LP4); video = ICU.obtenerInfoAdicVideo(JB,V13); idVideo = video.getiDVideo(); ICU.agregarVideoLista(CS,idVideo,LP4); video = ICU.obtenerInfoAdicVideo(HG,V8); idVideo = video.getiDVideo(); ICU.agregarVideoLista(NJ,idVideo,LP5); video = ICU.obtenerInfoAdicVideo(NJ,V16); idVideo = video.getiDVideo(); ICU.agregarVideoLista(NJ,idVideo,LP5); video = ICU.obtenerInfoAdicVideo(JB,V11); idVideo = video.getiDVideo(); ICU.agregarVideoLista(JB,idVideo,LP6); video = ICU.obtenerInfoAdicVideo(JB,V13); idVideo = video.getiDVideo(); ICU.agregarVideoLista(JB,idVideo,LP6); //LISTAS POR DEFECTO ICU.nuevaListaPorDefecto("Escuchar mas tarde"); //LD1 ICU.nuevaListaPorDefecto("Deporte total"); //LD2 ICU.nuevaListaPorDefecto("Novedades generales");//LD3 //COMENTARIOS int comentarioPadre; //video V7 VideoHandler vh=VideoHandler.getInstance(); DtVideo dtVideo =vh.member(V7,HG); //propietario del video HG fecha=asignarFecha("5,12,2017 14:35"); DtFecha fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),NJ, fechaVideo,"Fue un gran evento" ); comentarioPadre=obtenerIdComentario(dtVideo.getiDVideo(),NJ, fechaVideo); fecha = asignarFecha("8,12,2017 01:47");fechaVideo = new DtFecha(fecha); ICV.responderComentario(dtVideo.getiDVideo(),comentarioPadre, HR, fechaVideo, "Para el proximo aniversario ofrezco vamo con Los Momo"); comentarioPadre=obtenerIdComentario(dtVideo.getiDVideo(),HR, fechaVideo); fecha = asignarFecha("10,12,2017 17:09");fechaVideo = new DtFecha(fecha); ICV.responderComentario(dtVideo.getiDVideo(),comentarioPadre, TC, fechaVideo, "Yo ofrezco a la banda tb"); dtVideo =vh.member(V6,HG); fecha=asignarFecha("07,09,2017 04:56");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(), NJ, fechaVideo, "Felicitaciones FING"); dtVideo =vh.member(V8,HG); fecha=asignarFecha("23,10,2017 12:58");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),KH,fechaVideo, "Un gusto cubrir eventos como este."); comentarioPadre=obtenerIdComentario(dtVideo.getiDVideo(),NJ, fechaVideo); fecha = asignarFecha("11,09,2018 03:45");fechaVideo = new DtFecha(fecha); ICV.responderComentario(dtVideo.getiDVideo(),comentarioPadre, MT, fechaVideo, "Se viene la edicion 2018!!!"); dtVideo =vh.member(V13,JB); fecha=asignarFecha("14,11,2016 05:34");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),KH,fechaVideo,"Peñarol peñarol!!!"); dtVideo =vh.member(V3,KH); fecha=asignarFecha("30,10,2017 02:17");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),MT,fechaVideo,"Rock and Rollll"); dtVideo =vh.member(V3,JB); fecha=asignarFecha("30,10,2017 02:17");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),MT,fechaVideo,"Rock and Rollll"); dtVideo =vh.member(V4,KH); fecha=asignarFecha("25,08,2018 18:00");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),MT,fechaVideo,"Anoche exploto!!!"); dtVideo =vh.member(V1,CS); fecha=asignarFecha("11,09,2017 03:45");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),MT,fechaVideo,"Me encanta este tema"); comentarioPadre=obtenerIdComentario(dtVideo.getiDVideo(),MT, fechaVideo); fecha = asignarFecha("15,09,2017 12:29");fechaVideo = new DtFecha(fecha); ICV.responderComentario(dtVideo.getiDVideo(),comentarioPadre,TC,fechaVideo, "Gracias Marce ;)"); dtVideo =vh.member(V1,TC); fecha=asignarFecha("11,09,2017 03:45");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),MT,fechaVideo,"Me encanta este tema"); comentarioPadre=obtenerIdComentario(dtVideo.getiDVideo(),MT, fechaVideo); fecha = asignarFecha("15,09,2017 12:29");fechaVideo = new DtFecha(fecha); ICV.responderComentario(dtVideo.getiDVideo(),comentarioPadre,TC,fechaVideo, "Gracias Marce ;)"); dtVideo =vh.member(V1,TC); fecha=asignarFecha("15,09,2017 12:29");fechaVideo = new DtFecha(fecha); ICV.nuevoComentario(dtVideo.getiDVideo(),TC,fechaVideo,"Mi preferido por lejos!!!"); //ME GUSTA video = ICU.obtenerInfoAdicVideo(HG,V7); ICV.valorarVideo(video.getiDVideo(),SP, false); //falso = NO LE GUSTA video = ICU.obtenerInfoAdicVideo(HG,V8); ICV.valorarVideo(video.getiDVideo(),SP, true); video = ICU.obtenerInfoAdicVideo(JB,V11); ICV.valorarVideo(video.getiDVideo(),SP, true); video = ICU.obtenerInfoAdicVideo(KH,V4); ICV.valorarVideo(video.getiDVideo(),NJ, false); video = ICU.obtenerInfoAdicVideo(CS,V1); ICV.valorarVideo(video.getiDVideo(),NJ, false); video = ICU.obtenerInfoAdicVideo(HG,V7); ICV.valorarVideo(video.getiDVideo(),NJ, true); video = ICU.obtenerInfoAdicVideo(HG,V7); ICV.valorarVideo(video.getiDVideo(),KH, true); video = ICU.obtenerInfoAdicVideo(JB,V13); ICV.valorarVideo(video.getiDVideo(),KH, true); video = ICU.obtenerInfoAdicVideo(CS,V1); ICV.valorarVideo(video.getiDVideo(),MT, true); video = ICU.obtenerInfoAdicVideo(HG,V8); ICV.valorarVideo(video.getiDVideo(),MT, true); video = ICU.obtenerInfoAdicVideo(KH,V4); ICV.valorarVideo(video.getiDVideo(),MT, true); //historial video = ICU.obtenerInfoAdicVideo(TC, V1); ICU.agregarVisita(video.getiDVideo(),TC); ICU.agregarVisita(video.getiDVideo(),CS); video = ICU.obtenerInfoAdicVideo(TC, V2); ICU.agregarVisita(video.getiDVideo(),TC); ICU.agregarVisita(video.getiDVideo(),CS); video = ICU.obtenerInfoAdicVideo(CS,V2); ICU.agregarVisita(video.getiDVideo(),TC); ICU.agregarVisita(video.getiDVideo(),CS); video = ICU.obtenerInfoAdicVideo(KH,V3); agregarVisitasAlHistorial(MB, video.getiDVideo(), 5); agregarVisitasAlHistorial(CS, video.getiDVideo(), 20); agregarVisitasAlHistorial(JB, video.getiDVideo(), 1); agregarVisitasAlHistorial(DP, video.getiDVideo(), 2); agregarVisitasAlHistorial(KH, video.getiDVideo(), 8); video = ICU.obtenerInfoAdicVideo(KH,V4); agregarVisitasAlHistorial(KH, video.getiDVideo(), 6); agregarVisitasAlHistorial(NJ, video.getiDVideo(), 8); video = ICU.obtenerInfoAdicVideo(JB, V5); agregarVisitasAlHistorial(MB, video.getiDVideo(), 4); agregarVisitasAlHistorial(JB, video.getiDVideo(), 1); agregarVisitasAlHistorial(DP, video.getiDVideo(), 1); agregarVisitasAlHistorial(KH, video.getiDVideo(), 2); video = ICU.obtenerInfoAdicVideo(KH, V5); agregarVisitasAlHistorial(MB, video.getiDVideo(), 4); agregarVisitasAlHistorial(JB, video.getiDVideo(), 1); agregarVisitasAlHistorial(DP, video.getiDVideo(), 1); agregarVisitasAlHistorial(KH, video.getiDVideo(), 2); video = ICU.obtenerInfoAdicVideo(HG, V6); agregarVisitasAlHistorial(HR, video.getiDVideo(), 2); agregarVisitasAlHistorial(MB, video.getiDVideo(), 1); agregarVisitasAlHistorial(HG, video.getiDVideo(), 1); agregarVisitasAlHistorial(SP, video.getiDVideo(), 5); agregarVisitasAlHistorial(AP, video.getiDVideo(), 1); video = ICU.obtenerInfoAdicVideo(HG, V7); agregarVisitasAlHistorial(MB, video.getiDVideo(), 1); agregarVisitasAlHistorial(HG, video.getiDVideo(), 1); agregarVisitasAlHistorial(SP, video.getiDVideo(), 1); agregarVisitasAlHistorial(AR, video.getiDVideo(), 1); agregarVisitasAlHistorial(NJ, video.getiDVideo(), 3); video = ICU.obtenerInfoAdicVideo(HG, V8); agregarVisitasAlHistorial(MB, video.getiDVideo(), 1); agregarVisitasAlHistorial(HG, video.getiDVideo(), 1); agregarVisitasAlHistorial(SP, video.getiDVideo(), 1); agregarVisitasAlHistorial(NJ, video.getiDVideo(), 21); video = ICU.obtenerInfoAdicVideo(CS, V9); agregarVisitasAlHistorial(CS, video.getiDVideo(), 1); video = ICU.obtenerInfoAdicVideo(CS, V10); agregarVisitasAlHistorial(CS, video.getiDVideo(), 2); agregarVisitasAlHistorial(KH, video.getiDVideo(), 1); video = ICU.obtenerInfoAdicVideo(JB, V11); agregarVisitasAlHistorial(HR, video.getiDVideo(), 4); agregarVisitasAlHistorial(MB, video.getiDVideo(), 3); agregarVisitasAlHistorial(HG, video.getiDVideo(), 10); agregarVisitasAlHistorial(CS, video.getiDVideo(), 5); agregarVisitasAlHistorial(JB, video.getiDVideo(), 2); agregarVisitasAlHistorial(SP, video.getiDVideo(), 2); video = ICU.obtenerInfoAdicVideo(TC, V12); agregarVisitasAlHistorial(TC, video.getiDVideo(), 1); video = ICU.obtenerInfoAdicVideo(AP, V12); agregarVisitasAlHistorial(TC, video.getiDVideo(), 1); video = ICU.obtenerInfoAdicVideo(JB, V13); agregarVisitasAlHistorial(JB, video.getiDVideo(), 2); video = ICU.obtenerInfoAdicVideo(AR, V14); video = ICU.obtenerInfoAdicVideo(NJ, V15); agregarVisitasAlHistorial(NJ, video.getiDVideo(), 10); video = ICU.obtenerInfoAdicVideo(NJ, V16); agregarVisitasAlHistorial(NJ, video.getiDVideo(), 4); } private void agregarVisitasAlHistorial(String nick, int idV, int cantVeces){ for(int i=cantVeces; i>0;i--){ ICU.agregarVisita(idV,nick); } } private Date asignarFecha(String fechaConHora){ SimpleDateFormat sdf = new SimpleDateFormat("dd,MM,yyyy HH:mm"); Date fecha = null; try { fecha = sdf.parse(fechaConHora); } catch (ParseException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } return fecha; } private int obtenerIdComentario(int idVideo,String nick, DtFecha fechaComen){ DtComentario[] coments= ICV.listarComentarios(idVideo); return buscarComentario(coments, nick,fechaComen); } private int buscarComentario(DtComentario[] coments, String nick, DtFecha fechaComen){ DtComentario[] comentsHijos; int i=0; int idComentario=-1; while(i<coments.length){ if(coments[i].getFecha()==fechaComen && coments[i].getNickUsuario()==nick){ return coments[i].getIdComentario(); } comentsHijos = coments[i].getRespuestas(); idComentario = buscarComentario(comentsHijos,nick, fechaComen); i++; } return idComentario; } } <file_sep>package uytubeLogic.JUnitTests; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.util.Date; import org.junit.Test; import uytubeLogic.logica.Categoria; import uytubeLogic.logica.DtCategoria; import uytubeLogic.logica.DtComentario; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.DtPuntuacion; import uytubeLogic.logica.SystemHandler; import uytubeLogic.logica.UsuarioCtrl; import uytubeLogic.logica.Video; import uytubeLogic.logica.VideoCtrl; import uytubeLogic.logica.SystemHandler.Privacidad; public class VideoTest { @Test public void testIngresarNuevosDatosVideo() { String nombreVideo = "nombreINDV"; SystemHandler sh = SystemHandler.getInstance(); DtFecha fecha = new DtFecha(new Date(0)); DtFecha fechaN = new DtFecha(new Date(2)); Categoria categoriaE = new Categoria("cateINDV"); DtCategoria cate1 = null; DtCategoria cate2 = new DtCategoria(categoriaE); VideoCtrl VCU = VideoCtrl.getInstance(); VCU.crearCategoria("cateINDV"); Video video = new Video(nombreVideo, "duenio", "descr", 20, fecha, "hola.com", cate2, Privacidad.PRIVADO); assertEquals(true, video.getNombre() == nombreVideo); assertEquals(true, video.getCategoria().equals(cate2)); assertEquals(true, video.getPropietario() == "duenio"); assertEquals(true, video.getDescripcion() == "descr"); assertEquals(true, video.getFechaPublicacion().equals(fecha)); assertEquals(true, video.getPrivacidad() == Privacidad.PRIVADO); assertEquals(true, video.getDuracion() == 20); assertEquals(true, video.getURL() == "hola.com"); video.ingresarNuevosDatosVideo("nuevaDesc", 30, fechaN, "hola2.com", cate1, Privacidad.PUBLICO); assertEquals(true, video.getNombre() == nombreVideo); assertEquals(true, video.getCategoria().equals(new DtCategoria(sh.getSinCat()))); assertEquals(true, video.getPropietario() == "duenio"); assertEquals(true, video.getDescripcion() == "nuevaDesc"); assertEquals(true, video.getFechaPublicacion().equals(fechaN)); assertEquals(true, video.getPrivacidad() == Privacidad.PUBLICO); assertEquals(true, video.getDuracion() == 30); assertEquals(true, video.getURL() == "hola2.com"); } @Test public void testNuevoComentario() { String nombreVideo = "nombreNC"; String nombreU = "nombreUNC"; UsuarioCtrl UCU = UsuarioCtrl.getInstance(); DtFecha fecha = new DtFecha(new Date(2)); UCU.nuevoUsuario(nombreU, "1234", "pedrito", "gimenez", "email.com", fecha, null, "nombrecito", "descripcion", Privacidad.PRIVADO, null); Video video = new Video(nombreVideo, "pepito", "descrito", 20, fecha, "url.com", null, Privacidad.PRIVADO); video.nuevoComentario(nombreU, fecha, "contenidoComentario"); video.nuevoComentario(nombreU, fecha, "contenidoComentario2"); DtComentario[] comentarios = video.getComentarios(); assertEquals(2, comentarios.length); boolean existe1 = false; boolean existe2 = false; for (DtComentario comentarioParticular : comentarios) { if (comentarioParticular.isEsPadre() == true && comentarioParticular.getNickUsuario() == nombreU && comentarioParticular.getRespuestas().length == 0 && comentarioParticular.getTexto() == "contenidoComentario") { existe1 = true; } if (comentarioParticular.isEsPadre() == true && comentarioParticular.getNickUsuario() == nombreU && comentarioParticular.getRespuestas().length == 0 && comentarioParticular.getTexto() == "contenidoComentario2") { existe2 = true; } } assertTrue(existe1); assertTrue(existe2); } @Test public void testResponderComentario() { String nombreVideo = "nombreRC"; String nombreU = "nombreURC"; UsuarioCtrl UCU = UsuarioCtrl.getInstance(); DtFecha fecha = new DtFecha(new Date(2)); UCU.nuevoUsuario(nombreU, "1234", "pedrito", "gimenez", "email.com", fecha, null, "nombrecito", "descripcion", Privacidad.PRIVADO, null); Video video = new Video(nombreVideo, "pepito", "descrito", 20, fecha, "url.com", null, Privacidad.PRIVADO); video.nuevoComentario(nombreU, fecha, "contenidoComentario"); DtComentario[] comentarios = video.getComentarios(); video.responderComentario(comentarios[0].getIdComentario(), nombreU, fecha, "contenidoComentario2"); comentarios = video.getComentarios(); assertEquals(1, comentarios.length); boolean existe1 = false; boolean existe2 = false; for (DtComentario comentarioParticular : comentarios) { if (comentarioParticular.isEsPadre() == true && comentarioParticular.getNickUsuario() == nombreU && comentarioParticular.getRespuestas().length == 1 && comentarioParticular.getTexto() == "contenidoComentario") { existe1 = true; } DtComentario[] respuestas = comentarioParticular.getRespuestas(); if (respuestas.length > 0) { DtComentario respuestaParticular = respuestas[0]; if (respuestaParticular.isEsPadre() == false && respuestaParticular.getNickUsuario() == nombreU && respuestaParticular.getRespuestas().length == 0 && respuestaParticular.getTexto() == "contenidoComentario2") { existe2 = true; } } } assertTrue(existe1); assertTrue(existe2); } @Test public void testValorarVideo() { String nombreVideo = "nombreAP"; String nombreU = "nombreUAP"; String nombreU2 = "nombreUAP2"; UsuarioCtrl UCU = UsuarioCtrl.getInstance(); DtFecha fecha = new DtFecha(new Date(2)); UCU.nuevoUsuario(nombreU, "1234", "pedrito", "gimenez", "email.com", fecha, null, "nombrecito", "descripcion", Privacidad.PRIVADO, null); UCU.nuevoUsuario(nombreU2, "1234", "pedrito", "gimenez", "email.com", fecha, null, "nombrecito", "descripcion", Privacidad.PRIVADO, null); Video video = new Video(nombreVideo, "pepito", "descrito", 20, fecha, "url.com", null, Privacidad.PRIVADO); video.valorarVideo(nombreU, true); video.valorarVideo(nombreU2, false); DtPuntuacion[] puntuaciones = video.getPuntuaciones(); boolean existe1 = false; boolean existe2 = false; for (DtPuntuacion puntActual : puntuaciones) { if (puntActual.getNickname() == nombreU && puntActual.getValoracion() == true) { existe1 = true; } if (puntActual.getNickname() == nombreU2 && puntActual.getValoracion() == false) { existe2 = true; } } assertTrue(existe1); assertTrue(existe2); } } <file_sep>package uytube.admin.categoria.consulta; import javax.swing.JInternalFrame; import javax.swing.JComboBox; import javax.swing.JLabel; import javax.swing.JPanel; import java.awt.GridLayout; import javax.swing.JOptionPane; import uytubeLogic.logica.DtCategoria; import uytubeLogic.logica.DtListaReproduccion; import uytubeLogic.logica.DtVideo; import uytubeLogic.logica.IVideoCtrl; import javax.swing.JTable; import javax.swing.table.DefaultTableModel; import javax.swing.JSeparator; import java.awt.event.ActionListener; import java.awt.event.ActionEvent; import javax.swing.JScrollPane; public class ConsultaCategoriaJInternalFrame extends JInternalFrame { private JTable table_LDR; private JTable table_Video; /** * Launch the application. COMENTADO FUNCIONA IGUAL */ /*public static void main(String[] args) { Factory fabrica = Factory.getInstance(); IVideoCtrl ICV = fabrica.getIVideoCtrl(); EventQueue.invokeLater(new Runnable() { public void run() { try { ConsultaCategoriaJInternalFrame frame = new ConsultaCategoriaJInternalFrame(ICV); frame.setVisible(true); } catch (Exception e) { e.printStackTrace(); } } }); }*/ /** * Create the frame. * @param iCV */ public static void infoBox(String infoMessage, String titleBar){ JOptionPane.showMessageDialog(null, infoMessage, "" + titleBar, JOptionPane.INFORMATION_MESSAGE); } DefaultTableModel ModeloNombrePropietario() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("Nombre"); model.addColumn("Propietario"); return model; }; public ConsultaCategoriaJInternalFrame(IVideoCtrl iCV) { setResizable(true); setIconifiable(true); setMaximizable(true); setClosable(true); setTitle("Listar por categorias"); setBounds(100, 100, 550, 400); JPanel panel_Video = new JPanel(); JPanel panel = new JPanel(); JPanel panel_LDR = new JPanel(); getContentPane().setLayout(new GridLayout(0, 1, 0, 0)); getContentPane().add(panel); panel.setLayout(new GridLayout(3, 1, 5, 5)); getContentPane().add(panel_LDR); getContentPane().add(panel_Video); JLabel lblSeleccioneUnaCategoria = new JLabel("Seleccione una categoria"); panel.add(lblSeleccioneUnaCategoria); panel_LDR.setLayout(new GridLayout(0, 1, 0, 0)); JLabel lblListas = new JLabel("Listas"); panel_LDR.add(lblListas); panel_Video.setLayout(new GridLayout(0, 1, 0, 0)); JLabel lblVideos = new JLabel("Videos"); panel_Video.add(lblVideos); JComboBox comboBox = new JComboBox(); panel.add(comboBox); JScrollPane scrollPaneVideos = new JScrollPane(); panel_Video.add(scrollPaneVideos); table_Video = new JTable(ModeloNombrePropietario()); scrollPaneVideos.setViewportView(table_Video); JScrollPane scrollPaneLDR = new JScrollPane(); panel_LDR.add(scrollPaneLDR); table_LDR = new JTable(ModeloNombrePropietario()); scrollPaneLDR.setViewportView(table_LDR); DtCategoria[] set_cat=iCV.listarCategorias(); for(int i=0; i<set_cat.length;i++) {comboBox.addItem(set_cat[i].getNombre());} comboBox.setSelectedIndex(-1); JSeparator separator = new JSeparator(); panel.add(separator); comboBox.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { DefaultTableModel modelo_video= (DefaultTableModel) table_Video.getModel(); DefaultTableModel modelo_ldr= (DefaultTableModel) table_LDR.getModel(); modelo_video.setRowCount(0); modelo_ldr.setRowCount(0); DtVideo [] listarvideos= iCV.listarVideosPorCategoria(comboBox.getSelectedItem().toString(), null, null); DtListaReproduccion [] listarLDR= iCV.listarLDRPorCategoria(comboBox.getSelectedItem().toString(), null, null); if(listarvideos.length>0) { for(int i=0;i<listarvideos.length;i++) { modelo_video.addRow(new Object[]{listarvideos[i].getNombre(),listarvideos[i].getPropietario()}); } } else { modelo_video.addRow(new Object[]{"NO HAY","VIDEOS"}); } if(listarLDR.length>0) { for(int i=0; i<listarLDR.length;i++) { modelo_ldr.addRow(new Object[] {listarLDR[i].getNombre(),listarLDR[i].getPropietario()}); } } else { modelo_ldr.addRow(new Object[]{"NO HAY","LISTAS"}); } } }); } } <file_sep>package uytubeLogic.logica; import uytubeLogic.logica.SystemHandler.Privacidad; public class VideoCtrl implements IVideoCtrl { private static VideoCtrl instance = null; private VideoCtrl() { } public static VideoCtrl getInstance() { if (instance == null) instance = new VideoCtrl(); return instance; } public DtListaReproduccion[] listarLDRPorCategoria(String cat, Privacidad priv, String nomU) { CategoriaHandler CatHandler = CategoriaHandler.getInstance(); if (CatHandler.isMember(cat)) { Categoria cate = CatHandler.find(cat); return cate.listarLDR(priv,nomU); } else return null; } public DtVideo[] listarVideosPorCategoria(String cat, Privacidad priv, String nomU) { CategoriaHandler CatHandler = CategoriaHandler.getInstance(); if (CatHandler.isMember(cat)) { Categoria cate = CatHandler.find(cat); return cate.listarVideos(priv,nomU); } else return null; } public DtComentario[] listarComentarios(Integer IDVideo) { VideoHandler VidHandler = VideoHandler.getInstance(); Video videoEspecifico = VidHandler.find(IDVideo); return videoEspecifico.getComentarios(); } public void nuevoComentario(Integer IDVideo, String nickU, DtFecha fecha, String contenido) { VideoHandler VidHandler = VideoHandler.getInstance(); Video videoEspecifico = VidHandler.find(IDVideo); videoEspecifico.nuevoComentario(nickU, fecha, contenido); } public void responderComentario(Integer IDVideo, Integer IDCR, String nickU, DtFecha fecha, String contenido) { VideoHandler VidHandler = VideoHandler.getInstance(); Video videoEspecifico = VidHandler.find(IDVideo); videoEspecifico.responderComentario(IDCR, nickU, fecha, contenido); } public void valorarVideo(Integer IDVideo, String nickU, boolean valoracion) { VideoHandler VidHandler = VideoHandler.getInstance(); Video videoEspecifico = VidHandler.find(IDVideo); videoEspecifico.valorarVideo(nickU, valoracion); } public DtInfoVideo verDetallesVideoExt(Integer IDVideo) { VideoHandler VidHandler = VideoHandler.getInstance(); Video videoEspecifico = VidHandler.find(IDVideo); return videoEspecifico.getInfoVideoExt(); } public DtVideo infoAddVideo(Integer IDVideo) { VideoHandler VidHandler = VideoHandler.getInstance(); Video videoEspecifico = VidHandler.find(IDVideo); if(videoEspecifico!=null) return videoEspecifico.verDetallesVideo(); else return null; } public DtVideo[] listarVideos() { VideoHandler VidHandler = VideoHandler.getInstance(); return VidHandler.listarVideos(); } public DtCategoria[] listarCategorias() { CategoriaHandler CatHandler = CategoriaHandler.getInstance(); return CatHandler.listarCategorias(); } public void crearCategoria(String nombreCat) { CategoriaHandler CatHandler = CategoriaHandler.getInstance(); if (!CatHandler.isMember(nombreCat)) { Categoria nuevaCat = new Categoria(nombreCat); CatHandler.addCategoria(nuevaCat); } } public boolean existeCategoria(String nombreCat) { CategoriaHandler CatHandler = CategoriaHandler.getInstance(); return CatHandler.isMember(nombreCat); } @Override public DtVideo[] listarVideosPublicosPorNombre(String nombre) { VideoHandler VidHandler = VideoHandler.getInstance(); return VidHandler.listarVideosPublicosPorNombre(nombre); } @Override public String getEstadoValoracion(Integer IDVideo, String nickUsuario) { String status = ""; DtInfoVideo infoVideo = this.verDetallesVideoExt(IDVideo); DtUsuario[] usuariosLikes = infoVideo.getUsuariosGusta(); DtUsuario[] usuariosDislikes = infoVideo.getUsuariosNoGusta(); for (int index = 0; index < usuariosLikes.length; index++) { if (usuariosLikes[index].getNickname().equals(nickUsuario)) { status = "like"; } } for (int index = 0; index < usuariosDislikes.length; index++) { if (usuariosDislikes[index].getNickname().equals(nickUsuario)) { status = "dislike"; } } if (status == "") status = "neutral"; return status; } public boolean memberVideo(Integer idVideo) { VideoHandler VidHandler = VideoHandler.getInstance(); return VidHandler.find(idVideo)!=null; } } <file_sep> /** * Please modify this class to meet your needs * This class is not complete */ package uytubeLogica.publicar; import java.util.logging.Logger; import javax.jws.WebMethod; import javax.jws.WebParam; import javax.jws.WebResult; import javax.jws.WebService; import javax.jws.soap.SOAPBinding; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.ws.Action; /** * This class was generated by Apache CXF 3.2.6 * 2018-11-14T13:58:41.786-03:00 * Generated source version: 3.2.6 * */ @javax.jws.WebService( serviceName = "WebServicesService", portName = "WebServicesPort", targetNamespace = "http://publicar.uytubeLogica/", wsdlLocation = "http://localhost:9128/webservices?wsdl", endpointInterface = "uytubeLogica.publicar.WebServices") public class WebServicesPortImpl implements WebServices { private static final Logger LOG = Logger.getLogger(WebServicesPortImpl.class.getName()); /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#cambiarPrivLDR(java.lang.String arg0, java.lang.String arg1, uytubeLogica.publicar.Privacidad arg2)* */ public void cambiarPrivLDR(java.lang.String arg0, java.lang.String arg1, Privacidad arg2) { LOG.info("Executing operation cambiarPrivLDR"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#operacionPrueba()* */ public void operacionPrueba() { LOG.info("Executing operation operacionPrueba"); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarLDRPublicasPorNombre(java.lang.String arg0)* */ public uytubeLogica.publicar.DtListaReproduccionArray listarLDRPublicasPorNombre(java.lang.String arg0) { LOG.info("Executing operation listarLDRPublicasPorNombre"); System.out.println(arg0); try { uytubeLogica.publicar.DtListaReproduccionArray _return = new uytubeLogica.publicar.DtListaReproduccionArray(); java.util.List<uytubeLogica.publicar.DtListaReproduccion> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtListaReproduccion>(); uytubeLogica.publicar.DtListaReproduccion _returnItemVal1 = new uytubeLogica.publicar.DtListaReproduccion(); java.util.List<uytubeLogica.publicar.DtCategoria> _returnItemVal1CategoriasLDR = new java.util.ArrayList<uytubeLogica.publicar.DtCategoria>(); _returnItemVal1.getCategoriasLDR().addAll(_returnItemVal1CategoriasLDR); _returnItemVal1.setNombre("Nombre-1127601848"); uytubeLogica.publicar.Privacidad _returnItemVal1Privado = uytubeLogica.publicar.Privacidad.PRIVADO; _returnItemVal1.setPrivado(_returnItemVal1Privado); _returnItemVal1.setPropietario("Propietario-91283187"); uytubeLogica.publicar.TipoLista _returnItemVal1TipoL = uytubeLogica.publicar.TipoLista.PORDEFECTO; _returnItemVal1.setTipoL(_returnItemVal1TipoL); _returnItemVal1.setUltimoVideo(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.702-03:00")); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarLDRPorCategoria(java.lang.String arg0, uytubeLogica.publicar.Privacidad arg1, java.lang.String arg2)* */ public uytubeLogica.publicar.DtListaReproduccionArray listarLDRPorCategoria(java.lang.String arg0, Privacidad arg1, java.lang.String arg2) { LOG.info("Executing operation listarLDRPorCategoria"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { uytubeLogica.publicar.DtListaReproduccionArray _return = new uytubeLogica.publicar.DtListaReproduccionArray(); java.util.List<uytubeLogica.publicar.DtListaReproduccion> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtListaReproduccion>(); uytubeLogica.publicar.DtListaReproduccion _returnItemVal1 = new uytubeLogica.publicar.DtListaReproduccion(); java.util.List<uytubeLogica.publicar.DtCategoria> _returnItemVal1CategoriasLDR = new java.util.ArrayList<uytubeLogica.publicar.DtCategoria>(); _returnItemVal1.getCategoriasLDR().addAll(_returnItemVal1CategoriasLDR); _returnItemVal1.setNombre("Nombre1999994430"); uytubeLogica.publicar.Privacidad _returnItemVal1Privado = uytubeLogica.publicar.Privacidad.PRIVADO; _returnItemVal1.setPrivado(_returnItemVal1Privado); _returnItemVal1.setPropietario("Propietario97289151"); uytubeLogica.publicar.TipoLista _returnItemVal1TipoL = uytubeLogica.publicar.TipoLista.PORDEFECTO; _returnItemVal1.setTipoL(_returnItemVal1TipoL); _returnItemVal1.setUltimoVideo(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.703-03:00")); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#verificarDispUsuario(java.lang.String arg0, java.lang.String arg1)* */ public boolean verificarDispUsuario(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation verificarDispUsuario"); System.out.println(arg0); System.out.println(arg1); try { boolean _return = true; return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarUsuariosQueSigue(java.lang.String arg0)* */ public net.java.dev.jaxb.array.StringArray listarUsuariosQueSigue(java.lang.String arg0) { LOG.info("Executing operation listarUsuariosQueSigue"); System.out.println(arg0); try { net.java.dev.jaxb.array.StringArray _return = new net.java.dev.jaxb.array.StringArray(); java.util.List<java.lang.String> _returnItem = new java.util.ArrayList<java.lang.String>(); java.lang.String _returnItemVal1 = "_returnItemVal-450480077"; _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarCanalesPublicosPorNombre(java.lang.String arg0)* */ public uytubeLogica.publicar.DtCanalArray listarCanalesPublicosPorNombre(java.lang.String arg0) { LOG.info("Executing operation listarCanalesPublicosPorNombre"); System.out.println(arg0); try { uytubeLogica.publicar.DtCanalArray _return = new uytubeLogica.publicar.DtCanalArray(); java.util.List<uytubeLogica.publicar.DtCanal> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtCanal>(); uytubeLogica.publicar.DtCanal _returnItemVal1 = new uytubeLogica.publicar.DtCanal(); uytubeLogica.publicar.DtCategoria _returnItemVal1Categoria = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1Categoria.setNombre("Nombre44298059"); _returnItemVal1.setCategoria(_returnItemVal1Categoria); _returnItemVal1.setDescripcion("Descripcion-834455850"); _returnItemVal1.setNombre("Nombre-123555089"); uytubeLogica.publicar.Privacidad _returnItemVal1Privado = uytubeLogica.publicar.Privacidad.PUBLICO; _returnItemVal1.setPrivado(_returnItemVal1Privado); _returnItemVal1.setPropietario("Propietario363371352"); _returnItemVal1.setUltimoVideo(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.704-03:00")); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarDatosUsuario(java.lang.String arg0)* */ public uytubeLogica.publicar.DtUsuario listarDatosUsuario(java.lang.String arg0) { LOG.info("Executing operation listarDatosUsuario"); System.out.println(arg0); try { uytubeLogica.publicar.DtUsuario _return = new uytubeLogica.publicar.DtUsuario(); _return.setApellido("Apellido-1751197390"); _return.setEmail("Email954602316"); uytubeLogica.publicar.DtFecha _returnFechaNacimiento = new uytubeLogica.publicar.DtFecha(); _returnFechaNacimiento.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.705-03:00")); _return.setFechaNacimiento(_returnFechaNacimiento); byte[] _returnFoto = new byte[] {}; _return.setFoto(_returnFoto); _return.setNickname("Nickname-2140349646"); _return.setNombre("Nombre1273651997"); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#seguirUsuario(java.lang.String arg0, java.lang.String arg1)* */ public void seguirUsuario(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation seguirUsuario"); System.out.println(arg0); System.out.println(arg1); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#valorarVideo(int arg0, java.lang.String arg1, boolean arg2)* */ public void valorarVideo(int arg0, java.lang.String arg1, boolean arg2) { LOG.info("Executing operation valorarVideo"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#infoLDRdeUsuario(java.lang.String arg0, java.lang.String arg1, uytubeLogica.publicar.Privacidad arg2)* */ public uytubeLogica.publicar.DtListaReproduccionArray infoLDRdeUsuario(java.lang.String arg0, java.lang.String arg1, Privacidad arg2) { LOG.info("Executing operation infoLDRdeUsuario"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { uytubeLogica.publicar.DtListaReproduccionArray _return = new uytubeLogica.publicar.DtListaReproduccionArray(); java.util.List<uytubeLogica.publicar.DtListaReproduccion> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtListaReproduccion>(); uytubeLogica.publicar.DtListaReproduccion _returnItemVal1 = new uytubeLogica.publicar.DtListaReproduccion(); java.util.List<uytubeLogica.publicar.DtCategoria> _returnItemVal1CategoriasLDR = new java.util.ArrayList<uytubeLogica.publicar.DtCategoria>(); _returnItemVal1.getCategoriasLDR().addAll(_returnItemVal1CategoriasLDR); _returnItemVal1.setNombre("Nombre-1586096272"); uytubeLogica.publicar.Privacidad _returnItemVal1Privado = uytubeLogica.publicar.Privacidad.PRIVADO; _returnItemVal1.setPrivado(_returnItemVal1Privado); _returnItemVal1.setPropietario("Propietario-612975778"); uytubeLogica.publicar.TipoLista _returnItemVal1TipoL = uytubeLogica.publicar.TipoLista.PARTICULAR; _returnItemVal1.setTipoL(_returnItemVal1TipoL); _returnItemVal1.setUltimoVideo(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.709-03:00")); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#mostrarInfoCanal(java.lang.String arg0)* */ public uytubeLogica.publicar.DtCanal mostrarInfoCanal(java.lang.String arg0) { LOG.info("Executing operation mostrarInfoCanal"); System.out.println(arg0); try { uytubeLogica.publicar.DtCanal _return = new uytubeLogica.publicar.DtCanal(); uytubeLogica.publicar.DtCategoria _returnCategoria = new uytubeLogica.publicar.DtCategoria(); _returnCategoria.setNombre("Nombre-608644915"); _return.setCategoria(_returnCategoria); _return.setDescripcion("Descripcion-545218073"); _return.setNombre("Nombre-1540166898"); uytubeLogica.publicar.Privacidad _returnPrivado = uytubeLogica.publicar.Privacidad.PUBLICO; _return.setPrivado(_returnPrivado); _return.setPropietario("Propietario-176381017"); _return.setUltimoVideo(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.710-03:00")); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#agregarVideoLista(java.lang.String arg0, int arg1, java.lang.String arg2)* */ public void agregarVideoLista(java.lang.String arg0, int arg1, java.lang.String arg2) { LOG.info("Executing operation agregarVideoLista"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#verDetallesVideoExt(int arg0)* */ public uytubeLogica.publicar.DtInfoVideo verDetallesVideoExt(int arg0) { LOG.info("Executing operation verDetallesVideoExt"); System.out.println(arg0); try { uytubeLogica.publicar.DtInfoVideo _return = new uytubeLogica.publicar.DtInfoVideo(); java.util.List<uytubeLogica.publicar.DtComentario> _returnComentarios = new java.util.ArrayList<uytubeLogica.publicar.DtComentario>(); uytubeLogica.publicar.DtComentario _returnComentariosVal1 = new uytubeLogica.publicar.DtComentario(); _returnComentariosVal1.setEsPadre(true); uytubeLogica.publicar.DtFecha _returnComentariosVal1Fecha = new uytubeLogica.publicar.DtFecha(); _returnComentariosVal1Fecha.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.712-03:00")); _returnComentariosVal1.setFecha(_returnComentariosVal1Fecha); byte[] _returnComentariosVal1FotoDuenio = new byte[] {}; _returnComentariosVal1.setFotoDuenio(_returnComentariosVal1FotoDuenio); _returnComentariosVal1.setIdComentario(Integer.valueOf(-621768658)); _returnComentariosVal1.setNickUsuario("NickUsuario1137055633"); java.util.List<uytubeLogica.publicar.DtComentario> _returnComentariosVal1Respuestas = new java.util.ArrayList<uytubeLogica.publicar.DtComentario>(); _returnComentariosVal1.getRespuestas().addAll(_returnComentariosVal1Respuestas); _returnComentariosVal1.setTexto("Texto-393160894"); _returnComentarios.add(_returnComentariosVal1); _return.getComentarios().addAll(_returnComentarios); uytubeLogica.publicar.DtVideo _returnInfoVideo = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnInfoVideoCategoria = new uytubeLogica.publicar.DtCategoria(); _returnInfoVideoCategoria.setNombre("Nombre1646473896"); _returnInfoVideo.setCategoria(_returnInfoVideoCategoria); _returnInfoVideo.setDescripcion("Descripcion961937607"); _returnInfoVideo.setDuracionSS(-1120047624); uytubeLogica.publicar.DtFecha _returnInfoVideoFechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnInfoVideoFechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.713-03:00")); _returnInfoVideo.setFechaPublicacion(_returnInfoVideoFechaPublicacion); _returnInfoVideo.setNombre("Nombre1807785416"); uytubeLogica.publicar.Privacidad _returnInfoVideoPrivacidad = uytubeLogica.publicar.Privacidad.PRIVADO; _returnInfoVideo.setPrivacidad(_returnInfoVideoPrivacidad); _returnInfoVideo.setPropietario("Propietario933363368"); _returnInfoVideo.setUrl("Url833216365"); _returnInfoVideo.setIDVideo(Integer.valueOf(1416163985)); _return.setInfoVideo(_returnInfoVideo); java.util.List<uytubeLogica.publicar.DtUsuario> _returnUsuariosGusta = new java.util.ArrayList<uytubeLogica.publicar.DtUsuario>(); uytubeLogica.publicar.DtUsuario _returnUsuariosGustaVal1 = new uytubeLogica.publicar.DtUsuario(); _returnUsuariosGustaVal1.setApellido("Apellido1899956718"); _returnUsuariosGustaVal1.setEmail("Email391285058"); uytubeLogica.publicar.DtFecha _returnUsuariosGustaVal1FechaNacimiento = new uytubeLogica.publicar.DtFecha(); _returnUsuariosGustaVal1FechaNacimiento.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.713-03:00")); _returnUsuariosGustaVal1.setFechaNacimiento(_returnUsuariosGustaVal1FechaNacimiento); byte[] _returnUsuariosGustaVal1Foto = new byte[] {}; _returnUsuariosGustaVal1.setFoto(_returnUsuariosGustaVal1Foto); _returnUsuariosGustaVal1.setNickname("Nickname1255446705"); _returnUsuariosGustaVal1.setNombre("Nombre2123864229"); _returnUsuariosGusta.add(_returnUsuariosGustaVal1); _return.getUsuariosGusta().addAll(_returnUsuariosGusta); java.util.List<uytubeLogica.publicar.DtUsuario> _returnUsuariosNoGusta = new java.util.ArrayList<uytubeLogica.publicar.DtUsuario>(); uytubeLogica.publicar.DtUsuario _returnUsuariosNoGustaVal1 = new uytubeLogica.publicar.DtUsuario(); _returnUsuariosNoGustaVal1.setApellido("Apellido-1132082130"); _returnUsuariosNoGustaVal1.setEmail("Email-1890817580"); uytubeLogica.publicar.DtFecha _returnUsuariosNoGustaVal1FechaNacimiento = new uytubeLogica.publicar.DtFecha(); _returnUsuariosNoGustaVal1FechaNacimiento.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.714-03:00")); _returnUsuariosNoGustaVal1.setFechaNacimiento(_returnUsuariosNoGustaVal1FechaNacimiento); byte[] _returnUsuariosNoGustaVal1Foto = new byte[] {}; _returnUsuariosNoGustaVal1.setFoto(_returnUsuariosNoGustaVal1Foto); _returnUsuariosNoGustaVal1.setNickname("Nickname-1299618010"); _returnUsuariosNoGustaVal1.setNombre("Nombre-767754882"); _returnUsuariosNoGusta.add(_returnUsuariosNoGustaVal1); _return.getUsuariosNoGusta().addAll(_returnUsuariosNoGusta); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#responderComentario(int arg0, int arg1, java.lang.String arg2, uytubeLogica.publicar.DtFecha arg3, java.lang.String arg4)* */ public void responderComentario(int arg0, int arg1, java.lang.String arg2, DtFecha arg3, java.lang.String arg4) { LOG.info("Executing operation responderComentario"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); System.out.println(arg3); System.out.println(arg4); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } public boolean memberVideo(int arg0) { LOG.info("Executing operation memberVideo"); System.out.println(arg0); try { boolean _return = true; return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } public void agregarVisita(int arg0, java.lang.String arg1) { LOG.info("Executing operation agregarVisita"); System.out.println(arg0); System.out.println(arg1); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarVideosPorCategoria(java.lang.String arg0, uytubeLogica.publicar.Privacidad arg1, java.lang.String arg2)* */ public uytubeLogica.publicar.DtVideoArray listarVideosPorCategoria(java.lang.String arg0, Privacidad arg1, java.lang.String arg2) { LOG.info("Executing operation listarVideosPorCategoria"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { uytubeLogica.publicar.DtVideoArray _return = new uytubeLogica.publicar.DtVideoArray(); java.util.List<uytubeLogica.publicar.DtVideo> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtVideo>(); uytubeLogica.publicar.DtVideo _returnItemVal1 = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnItemVal1Categoria = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1Categoria.setNombre("Nombre171776911"); _returnItemVal1.setCategoria(_returnItemVal1Categoria); _returnItemVal1.setDescripcion("Descripcion-1179853369"); _returnItemVal1.setDuracionSS(-2145247907); uytubeLogica.publicar.DtFecha _returnItemVal1FechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnItemVal1FechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.717-03:00")); _returnItemVal1.setFechaPublicacion(_returnItemVal1FechaPublicacion); _returnItemVal1.setNombre("Nombre877855761"); uytubeLogica.publicar.Privacidad _returnItemVal1Privacidad = uytubeLogica.publicar.Privacidad.PRIVADO; _returnItemVal1.setPrivacidad(_returnItemVal1Privacidad); _returnItemVal1.setPropietario("Propietario1914766170"); _returnItemVal1.setUrl("Url1317743853"); _returnItemVal1.setIDVideo(Integer.valueOf(-1334379505)); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#nuevoUsuario(java.lang.String arg0, java.lang.String arg1, java.lang.String arg2, java.lang.String arg3, java.lang.String arg4, uytubeLogica.publicar.DtFecha arg5, byte[] arg6, java.lang.String arg7, java.lang.String arg8, uytubeLogica.publicar.Privacidad arg9, java.lang.String arg10)* */ public void nuevoUsuario(java.lang.String arg0, java.lang.String arg1, java.lang.String arg2, java.lang.String arg3, java.lang.String arg4, DtFecha arg5, byte[] arg6, java.lang.String arg7, java.lang.String arg8, Privacidad arg9, java.lang.String arg10) { LOG.info("Executing operation nuevoUsuario"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); System.out.println(arg3); System.out.println(arg4); System.out.println(arg5); System.out.println(arg6); System.out.println(arg7); System.out.println(arg8); System.out.println(arg9); System.out.println(arg10); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#verificarLogin(java.lang.String arg0, java.lang.String arg1)* */ public boolean verificarLogin(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation verificarLogin"); System.out.println(arg0); System.out.println(arg1); try { boolean _return = false; return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#nuevaListaParticular(java.lang.String arg0, java.lang.String arg1, uytubeLogica.publicar.Privacidad arg2)* */ public void nuevaListaParticular(java.lang.String arg0, java.lang.String arg1, Privacidad arg2) { LOG.info("Executing operation nuevaListaParticular"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#nuevoComentario(int arg0, java.lang.String arg1, uytubeLogica.publicar.DtFecha arg2, java.lang.String arg3)* */ public void nuevoComentario(int arg0, java.lang.String arg1, DtFecha arg2, java.lang.String arg3) { LOG.info("Executing operation nuevoComentario"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); System.out.println(arg3); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#eliminarVideoLista(java.lang.String arg0, int arg1, java.lang.String arg2)* */ public void eliminarVideoLista(java.lang.String arg0, int arg1, java.lang.String arg2) { LOG.info("Executing operation eliminarVideoLista"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarVideoListaReproduccion(java.lang.String arg0, java.lang.String arg1)* */ public uytubeLogica.publicar.DtVideoArray listarVideoListaReproduccion(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation listarVideoListaReproduccion"); System.out.println(arg0); System.out.println(arg1); try { uytubeLogica.publicar.DtVideoArray _return = new uytubeLogica.publicar.DtVideoArray(); java.util.List<uytubeLogica.publicar.DtVideo> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtVideo>(); uytubeLogica.publicar.DtVideo _returnItemVal1 = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnItemVal1Categoria = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1Categoria.setNombre("Nombre-214635391"); _returnItemVal1.setCategoria(_returnItemVal1Categoria); _returnItemVal1.setDescripcion("Descripcion1679264134"); _returnItemVal1.setDuracionSS(2056604007); uytubeLogica.publicar.DtFecha _returnItemVal1FechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnItemVal1FechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.719-03:00")); _returnItemVal1.setFechaPublicacion(_returnItemVal1FechaPublicacion); _returnItemVal1.setNombre("Nombre52031196"); uytubeLogica.publicar.Privacidad _returnItemVal1Privacidad = uytubeLogica.publicar.Privacidad.PUBLICO; _returnItemVal1.setPrivacidad(_returnItemVal1Privacidad); _returnItemVal1.setPropietario("Propietario1279732037"); _returnItemVal1.setUrl("Url-450265072"); _returnItemVal1.setIDVideo(Integer.valueOf(-251602568)); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#getEstadoValoracion(int arg0, java.lang.String arg1)* */ public java.lang.String getEstadoValoracion(int arg0, java.lang.String arg1) { LOG.info("Executing operation getEstadoValoracion"); System.out.println(arg0); System.out.println(arg1); try { java.lang.String _return = "_return-370773758"; return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarCategorias()* */ public uytubeLogica.publicar.DtCategoriaArray listarCategorias() { LOG.info("Executing operation listarCategorias"); try { uytubeLogica.publicar.DtCategoriaArray _return = new uytubeLogica.publicar.DtCategoriaArray(); java.util.List<uytubeLogica.publicar.DtCategoria> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtCategoria>(); uytubeLogica.publicar.DtCategoria _returnItemVal1 = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1.setNombre("Nombre-771443127"); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } public uytubeLogica.publicar.DtVideoHistorialArray listarVideoHistorial(java.lang.String arg0) { LOG.info("Executing operation listarVideoHistorial"); System.out.println(arg0); try { uytubeLogica.publicar.DtVideoHistorialArray _return = new uytubeLogica.publicar.DtVideoHistorialArray(); java.util.List<uytubeLogica.publicar.DtVideoHistorial> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtVideoHistorial>(); uytubeLogica.publicar.DtVideoHistorial _returnItemVal1 = new uytubeLogica.publicar.DtVideoHistorial(); uytubeLogica.publicar.DtFecha _returnItemVal1UltimaVisita = new uytubeLogica.publicar.DtFecha(); _returnItemVal1UltimaVisita.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-31T15:18:40.042-03:00")); _returnItemVal1.setUltimaVisita(_returnItemVal1UltimaVisita); uytubeLogica.publicar.DtVideo _returnItemVal1Video = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnItemVal1VideoCategoria = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1VideoCategoria.setNombre("Nombre-1706235754"); _returnItemVal1Video.setCategoria(_returnItemVal1VideoCategoria); _returnItemVal1Video.setDescripcion("Descripcion-1628903202"); _returnItemVal1Video.setDuracionSS(1466277558); uytubeLogica.publicar.DtFecha _returnItemVal1VideoFechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnItemVal1VideoFechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-31T15:18:40.042-03:00")); _returnItemVal1Video.setFechaPublicacion(_returnItemVal1VideoFechaPublicacion); _returnItemVal1Video.setNombre("Nombre-960192962"); uytubeLogica.publicar.Privacidad _returnItemVal1VideoPrivacidad = uytubeLogica.publicar.Privacidad.PUBLICO; _returnItemVal1Video.setPrivacidad(_returnItemVal1VideoPrivacidad); _returnItemVal1Video.setPropietario("Propietario1819933705"); _returnItemVal1Video.setUrl("Url-1052327"); _returnItemVal1Video.setIDVideo(Integer.valueOf(2083689435)); _returnItemVal1.setVideo(_returnItemVal1Video); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#memberListaReproduccionPropia(java.lang.String arg0, java.lang.String arg1)* */ public boolean memberListaReproduccionPropia(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation memberListaReproduccionPropia"); System.out.println(arg0); System.out.println(arg1); try { boolean _return = false; return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarComentarios(int arg0)* */ public uytubeLogica.publicar.DtComentarioArray listarComentarios(int arg0) { LOG.info("Executing operation listarComentarios"); System.out.println(arg0); try { uytubeLogica.publicar.DtComentarioArray _return = new uytubeLogica.publicar.DtComentarioArray(); java.util.List<uytubeLogica.publicar.DtComentario> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtComentario>(); uytubeLogica.publicar.DtComentario _returnItemVal1 = new uytubeLogica.publicar.DtComentario(); _returnItemVal1.setEsPadre(false); uytubeLogica.publicar.DtFecha _returnItemVal1Fecha = new uytubeLogica.publicar.DtFecha(); _returnItemVal1Fecha.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.720-03:00")); _returnItemVal1.setFecha(_returnItemVal1Fecha); byte[] _returnItemVal1FotoDuenio = new byte[] {}; _returnItemVal1.setFotoDuenio(_returnItemVal1FotoDuenio); _returnItemVal1.setIdComentario(Integer.valueOf(1479820374)); _returnItemVal1.setNickUsuario("NickUsuario-736868987"); java.util.List<uytubeLogica.publicar.DtComentario> _returnItemVal1Respuestas = new java.util.ArrayList<uytubeLogica.publicar.DtComentario>(); _returnItemVal1.getRespuestas().addAll(_returnItemVal1Respuestas); _returnItemVal1.setTexto("Texto-327017073"); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#obtenerDtsVideosListaReproduccionUsuario(java.lang.String arg0, java.lang.String arg1)* */ public uytubeLogica.publicar.DtVideoArray obtenerDtsVideosListaReproduccionUsuario(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation obtenerDtsVideosListaReproduccionUsuario"); System.out.println(arg0); System.out.println(arg1); try { uytubeLogica.publicar.DtVideoArray _return = new uytubeLogica.publicar.DtVideoArray(); java.util.List<uytubeLogica.publicar.DtVideo> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtVideo>(); uytubeLogica.publicar.DtVideo _returnItemVal1 = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnItemVal1Categoria = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1Categoria.setNombre("Nombre-288638675"); _returnItemVal1.setCategoria(_returnItemVal1Categoria); _returnItemVal1.setDescripcion("Descripcion2087173308"); _returnItemVal1.setDuracionSS(1857431255); uytubeLogica.publicar.DtFecha _returnItemVal1FechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnItemVal1FechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.721-03:00")); _returnItemVal1.setFechaPublicacion(_returnItemVal1FechaPublicacion); _returnItemVal1.setNombre("Nombre-25230903"); uytubeLogica.publicar.Privacidad _returnItemVal1Privacidad = uytubeLogica.publicar.Privacidad.PUBLICO; _returnItemVal1.setPrivacidad(_returnItemVal1Privacidad); _returnItemVal1.setPropietario("Propietario-1014670125"); _returnItemVal1.setUrl("Url975053402"); _returnItemVal1.setIDVideo(Integer.valueOf(-496636064)); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#infoListaReproduccion(java.lang.String arg0, java.lang.String arg1)* */ public uytubeLogica.publicar.DtListaReproduccion infoListaReproduccion(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation infoListaReproduccion"); System.out.println(arg0); System.out.println(arg1); try { uytubeLogica.publicar.DtListaReproduccion _return = new uytubeLogica.publicar.DtListaReproduccion(); java.util.List<uytubeLogica.publicar.DtCategoria> _returnCategoriasLDR = new java.util.ArrayList<uytubeLogica.publicar.DtCategoria>(); uytubeLogica.publicar.DtCategoria _returnCategoriasLDRVal1 = new uytubeLogica.publicar.DtCategoria(); _returnCategoriasLDRVal1.setNombre("Nombre1956200215"); _returnCategoriasLDR.add(_returnCategoriasLDRVal1); _return.getCategoriasLDR().addAll(_returnCategoriasLDR); _return.setNombre("Nombre-1872162935"); uytubeLogica.publicar.Privacidad _returnPrivado = uytubeLogica.publicar.Privacidad.PRIVADO; _return.setPrivado(_returnPrivado); _return.setPropietario("Propietario1055721370"); uytubeLogica.publicar.TipoLista _returnTipoL = uytubeLogica.publicar.TipoLista.PARTICULAR; _return.setTipoL(_returnTipoL); _return.setUltimoVideo(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.722-03:00")); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#aniadirVideo(java.lang.String arg0, java.lang.String arg1, java.lang.String arg2, int arg3, uytubeLogica.publicar.DtFecha arg4, java.lang.String arg5, uytubeLogica.publicar.DtCategoria arg6, uytubeLogica.publicar.Privacidad arg7)* */ public void aniadirVideo(java.lang.String arg0, java.lang.String arg1, java.lang.String arg2, int arg3, DtFecha arg4, java.lang.String arg5, DtCategoria arg6, Privacidad arg7) { LOG.info("Executing operation aniadirVideo"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); System.out.println(arg3); System.out.println(arg4); System.out.println(arg5); System.out.println(arg6); System.out.println(arg7); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarUsuariosQueLeSigue(java.lang.String arg0)* */ public net.java.dev.jaxb.array.StringArray listarUsuariosQueLeSigue(java.lang.String arg0) { LOG.info("Executing operation listarUsuariosQueLeSigue"); System.out.println(arg0); try { net.java.dev.jaxb.array.StringArray _return = new net.java.dev.jaxb.array.StringArray(); java.util.List<java.lang.String> _returnItem = new java.util.ArrayList<java.lang.String>(); java.lang.String _returnItemVal1 = "_returnItemVal-1775978820"; _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#infoVideosCanal(java.lang.String arg0, java.lang.String arg1, uytubeLogica.publicar.Privacidad arg2)* */ public uytubeLogica.publicar.DtVideoArray infoVideosCanal(java.lang.String arg0, java.lang.String arg1, Privacidad arg2) { LOG.info("Executing operation infoVideosCanal"); System.out.println(arg0); System.out.println(arg1); System.out.println(arg2); try { uytubeLogica.publicar.DtVideoArray _return = new uytubeLogica.publicar.DtVideoArray(); java.util.List<uytubeLogica.publicar.DtVideo> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtVideo>(); uytubeLogica.publicar.DtVideo _returnItemVal1 = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnItemVal1Categoria = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1Categoria.setNombre("Nombre1107111490"); _returnItemVal1.setCategoria(_returnItemVal1Categoria); _returnItemVal1.setDescripcion("Descripcion-1402328167"); _returnItemVal1.setDuracionSS(-593774130); uytubeLogica.publicar.DtFecha _returnItemVal1FechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnItemVal1FechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.723-03:00")); _returnItemVal1.setFechaPublicacion(_returnItemVal1FechaPublicacion); _returnItemVal1.setNombre("Nombre-1352237021"); uytubeLogica.publicar.Privacidad _returnItemVal1Privacidad = uytubeLogica.publicar.Privacidad.PRIVADO; _returnItemVal1.setPrivacidad(_returnItemVal1Privacidad); _returnItemVal1.setPropietario("Propietario849130977"); _returnItemVal1.setUrl("Url-1495140531"); _returnItemVal1.setIDVideo(Integer.valueOf(-1191893905)); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarLDRdeUsuario(java.lang.String arg0)* */ public net.java.dev.jaxb.array.StringArray listarLDRdeUsuario(java.lang.String arg0) { LOG.info("Executing operation listarLDRdeUsuario"); System.out.println(arg0); try { net.java.dev.jaxb.array.StringArray _return = new net.java.dev.jaxb.array.StringArray(); java.util.List<java.lang.String> _returnItem = new java.util.ArrayList<java.lang.String>(); java.lang.String _returnItemVal1 = "_returnItemVal197856273"; _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } public void bajaUsuario(java.lang.String arg0) { LOG.info("Executing operation bajaUsuario"); System.out.println(arg0); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#infoAddVideo(int arg0)* */ public uytubeLogica.publicar.DtVideo infoAddVideo(int arg0) { LOG.info("Executing operation infoAddVideo"); System.out.println(arg0); try { uytubeLogica.publicar.DtVideo _return = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnCategoria = new uytubeLogica.publicar.DtCategoria(); _returnCategoria.setNombre("Nombre-1113929705"); _return.setCategoria(_returnCategoria); _return.setDescripcion("Descripcion764360289"); _return.setDuracionSS(-980519763); uytubeLogica.publicar.DtFecha _returnFechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnFechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.724-03:00")); _return.setFechaPublicacion(_returnFechaPublicacion); _return.setNombre("Nombre-957241626"); uytubeLogica.publicar.Privacidad _returnPrivacidad = uytubeLogica.publicar.Privacidad.PRIVADO; _return.setPrivacidad(_returnPrivacidad); _return.setPropietario("Propietario793457349"); _return.setUrl("Url-279520787"); _return.setIDVideo(Integer.valueOf(-1611839120)); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#cargarDatos()* */ public void cargarDatos() { LOG.info("Executing operation cargarDatos"); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#listarVideosPublicosPorNombre(java.lang.String arg0)* */ public uytubeLogica.publicar.DtVideoArray listarVideosPublicosPorNombre(java.lang.String arg0) { LOG.info("Executing operation listarVideosPublicosPorNombre"); System.out.println(arg0); try { uytubeLogica.publicar.DtVideoArray _return = new uytubeLogica.publicar.DtVideoArray(); java.util.List<uytubeLogica.publicar.DtVideo> _returnItem = new java.util.ArrayList<uytubeLogica.publicar.DtVideo>(); uytubeLogica.publicar.DtVideo _returnItemVal1 = new uytubeLogica.publicar.DtVideo(); uytubeLogica.publicar.DtCategoria _returnItemVal1Categoria = new uytubeLogica.publicar.DtCategoria(); _returnItemVal1Categoria.setNombre("Nombre-104780040"); _returnItemVal1.setCategoria(_returnItemVal1Categoria); _returnItemVal1.setDescripcion("Descripcion-872457522"); _returnItemVal1.setDuracionSS(-858871000); uytubeLogica.publicar.DtFecha _returnItemVal1FechaPublicacion = new uytubeLogica.publicar.DtFecha(); _returnItemVal1FechaPublicacion.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.729-03:00")); _returnItemVal1.setFechaPublicacion(_returnItemVal1FechaPublicacion); _returnItemVal1.setNombre("Nombre-1294533723"); uytubeLogica.publicar.Privacidad _returnItemVal1Privacidad = uytubeLogica.publicar.Privacidad.PUBLICO; _returnItemVal1.setPrivacidad(_returnItemVal1Privacidad); _returnItemVal1.setPropietario("Propietario-1158211173"); _returnItemVal1.setUrl("Url-13861538"); _returnItemVal1.setIDVideo(Integer.valueOf(-1139776167)); _returnItem.add(_returnItemVal1); _return.getItem().addAll(_returnItem); return _return; } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } /* (non-Javadoc) * @see uytubeLogica.publicar.WebServices#dejarUsuario(java.lang.String arg0, java.lang.String arg1)* */ public void dejarUsuario(java.lang.String arg0, java.lang.String arg1) { LOG.info("Executing operation dejarUsuario"); System.out.println(arg0); System.out.println(arg1); try { } catch (java.lang.Exception ex) { ex.printStackTrace(); throw new RuntimeException(ex); } } } <file_sep>package uytubeLogic.logica; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; public class Comentario { private Integer iDComentario; private String texto; private DtFecha fecha; private boolean esPadre; private Map<Integer, Comentario> respuestas; private Usuario usuarioComentador; public Comentario(Integer idComentario, String text, DtFecha fech, boolean privacity, String nombreUsuarioComentador) { iDComentario = idComentario; texto = text; fecha = fech; esPadre = privacity; respuestas = new HashMap<Integer, Comentario>(); UsuarioHandler manejadorUsuario = UsuarioHandler.getInstance(); Usuario user = manejadorUsuario.find(nombreUsuarioComentador); usuarioComentador = user; } public Integer getIDComentario() { return iDComentario; } public String getTexto() { return texto; } public DtFecha getFecha() { return fecha; } public boolean getEsPadre() { return esPadre; } public void addComentario(Comentario coment) { respuestas.put(coment.getIDComentario(), coment); } public Usuario getUsuario() { return usuarioComentador; } public DtComentario[] getDtRespuestas() { List <DtComentario> comments= new ArrayList<DtComentario>(); for (Map.Entry<Integer, Comentario> entry : respuestas.entrySet()) { comments.add(new DtComentario(entry.getValue())); } Collections.sort(comments); DtComentario[] res = comments.toArray(new DtComentario[0]); return res; } public void eliminarHijos() { for (Map.Entry<Integer, Comentario> entry : respuestas.entrySet()) { respuestas.remove(entry.getKey()); } } } <file_sep><%@page import="java.text.SimpleDateFormat"%> <%@page import="java.text.DateFormat"%> <%@page import="uytubeLogica.publicar.DtListaReproduccion"%> <%@ page import = "uytubeLogica.publicar.DtVideo"%> <%@ page import = "uytubeLogica.publicar.Privacidad"%> <%@ page import = "uytubeLogica.publicar.DtCanal"%> <%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <!DOCTYPE html> <html> <head> <link rel="stylesheet" href="consultaListaReproduccion.css"> <meta charset="UTF-8"> <title>UyTube</title> </head> <body> <table id="TablaContenidos"> <tr> <th valign="top"> Videos </th> <th valign="top"> Nombre </th> <th valign="top"> Propietario </th> </tr> <% DtListaReproduccion[] listas=(DtListaReproduccion[]) request.getAttribute("listarListasReproduccion"); for(DtListaReproduccion entry: listas){ if(entry.getPrivado().equals(Privacidad.PUBLICO)){ %> <tr> <td>Lista de Reproduccion <form action="playlist" method="get"> <input type="hidden" name="action" value="details"> <input type="hidden" name="nameList" value="<%=entry.getNombre()%>"> <input type="hidden" name="ownerList" value="<%=entry.getPropietario() %>"> <input type="submit" value="Ver Info"> </form> </td> <td id="NombreTD"><%=entry.getNombre()%></td> <td id="PropietarioTD"><%=entry.getPropietario() %></td> </tr> <% } } %> <tr> <td> <% if((DtListaReproduccion[]) request.getAttribute("listasPrivadasSesion")!=null){ DtListaReproduccion[] listasPrivadas=(DtListaReproduccion[]) request.getAttribute("listasPrivadasSesion"); for(DtListaReproduccion entry: listasPrivadas){ %> <tr> <td>Lista de Reproduccion Privada <form action="playlist" method="get"> <input type="hidden" name="action" value="details"> <input type="hidden" name="nameList" value="<%=entry.getNombre()%>"> <input type="hidden" name="ownerList" value="<%=entry.getPropietario() %>"> <input type="submit" value="Ver Info"> </form> <form action="modifyPlaylist" method="get"> <input type="hidden" name="action" value="modify"> <input type="hidden" name="nameList" value="<%=entry.getNombre()%>"> <input type="hidden" name="ownerList" value="<%=entry.getPropietario() %>"> <input type="submit" value="Modificar"> </form> </td> <td id="NombreTD"><%=entry.getNombre()%></td> <td id="PropietarioTD"><%=entry.getPropietario() %></td> </tr> <% }} %> <tr> <td> </tr> </table> </body> </html><file_sep>package uytube.admin.usuarios; import java.awt.EventQueue; import javax.swing.JInternalFrame; import javax.swing.JPanel; import java.awt.GridLayout; public class SeguirUsuarioInternalFrame extends JInternalFrame { /** * Launch the application. */ public static void main(String[] args) { EventQueue.invokeLater(new Runnable() { public void run() { try { SeguirUsuarioInternalFrame frame = new SeguirUsuarioInternalFrame(); frame.setVisible(true); } catch (Exception e) { e.printStackTrace(); } } }); } /** * Create the frame. */ public SeguirUsuarioInternalFrame() { setClosable(true); setIconifiable(true); setMaximizable(true); setResizable(true); setBounds(100, 100, 450, 300); getContentPane().setLayout(new GridLayout(0, 1, 0, 0)); JPanel panel = new JPanel(); getContentPane().add(panel); panel.setLayout(new GridLayout(1, 0, 0, 0)); } } <file_sep>package uytube.admin; import java.awt.BorderLayout; import java.awt.EventQueue; import javax.swing.JFrame; import javax.swing.JPanel; import javax.swing.border.EmptyBorder; import javax.swing.JMenuBar; import javax.swing.JMenu; import javax.swing.JMenuItem; import java.awt.event.ActionListener; import java.io.IOException; import java.awt.event.ActionEvent; import uytube.admin.categoria.alta.AltaCategoria; import uytube.admin.categoria.consulta.ConsultaCategoriaJInternalFrame; import uytube.admin.listas.AgregarVideoListaInternalFrame; import uytube.admin.listas.ConsultaListaInternalFrame; import uytube.admin.listas.CrearListaInternalFrame; import uytube.admin.listas.ModificarListaFrame; import uytube.admin.listas.QuitarVideoListaInternalFrame; import uytube.admin.usuarios.AltaUsuarioInternalFrame; import uytube.admin.usuarios.ConsultaUsuarioInternalFrame; import uytube.admin.usuarios.ListarUsuariosInternalFrame; import uytube.admin.usuarios.modificarUsuario; import uytube.admin.usuarios.verUsuariosEliminados; import uytube.admin.videos.AltaVideo; import uytube.admin.videos.ModificarVideo; import uytube.admin.videos.ValorarVideo; import uytube.admin.videos.consultar.ConsultarVideoInternalFrame; import uytube.admin.videos.consultar.ListarCategoriasInternalFrame; import uytube.datosPrueba.DatosDePrueba; import uytubeLogic.logica.Fabrica; import uytubeLogic.logica.IUsuarioCtrl; import uytubeLogic.logica.IVideoCtrl; import uytubeLogica.publicar.WebServices; import javax.swing.JButton; public class adminPrincipal extends JFrame { private JPanel contentPane; private AltaUsuarioInternalFrame aUsrIFrame; private modificarUsuario modUsrIFrame; private ConsultaUsuarioInternalFrame conUsrIFrame; private ListarUsuariosInternalFrame listarUsrIFrame; private AgregarVideoListaInternalFrame addVideoListIFrame; private QuitarVideoListaInternalFrame quitarVideoListIFrame; private CrearListaInternalFrame crearListIFrame; private ConsultaCategoriaJInternalFrame consultacategoria; private AltaVideo altaVideoFrame; private ConsultarVideoInternalFrame consultarVideoFrame; private ModificarVideo modificarVideoFrame; private ValorarVideo valorarVideoFrame; private ConsultaListaInternalFrame consultaListaFrame; private ListarCategoriasInternalFrame listarCategoriasFrame; private ModificarListaFrame modificarListaFrame; private verUsuariosEliminados verUsrEliminadosFrame; private IUsuarioCtrl ICU; private IVideoCtrl ICV; /** * Launch the application. */ public static void main(String[] args) { EventQueue.invokeLater(new Runnable() { public void run() { try { adminPrincipal frame = new adminPrincipal(); frame.setVisible(true); } catch (Exception e) { e.printStackTrace(); } } }); } /** * Create the frame. */ public adminPrincipal() { setTitle("UyTube Admin"); Fabrica fabrica = Fabrica.getInstance(); ICU = fabrica.getIUsuarioCtrl(); ICV = fabrica.getIVideoCtrl(); setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); setBounds(100, 100, 450, 300); setExtendedState(MAXIMIZED_BOTH); JMenuBar menuBar = new JMenuBar(); setJMenuBar(menuBar); JMenu mnNewMenu = new JMenu("Usuario"); menuBar.add(mnNewMenu); JMenuItem mntmAlta = new JMenuItem("Alta"); mntmAlta.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { aUsrIFrame = new AltaUsuarioInternalFrame(ICU, ICV); contentPane.setLayout(null); contentPane.add(aUsrIFrame); aUsrIFrame.setVisible(true); } }); mnNewMenu.add(mntmAlta); JMenuItem mntmModificar = new JMenuItem("Modificar"); mntmModificar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { modUsrIFrame = new modificarUsuario(ICU); contentPane.setLayout(null); contentPane.add(modUsrIFrame); modUsrIFrame.setVisible(true); } }); mnNewMenu.add(mntmModificar); JMenuItem mntmConsulta = new JMenuItem("Consulta"); mntmConsulta.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { conUsrIFrame = new ConsultaUsuarioInternalFrame(ICU); contentPane.setLayout(null); contentPane.add(conUsrIFrame); conUsrIFrame.setVisible(true); } }); mnNewMenu.add(mntmConsulta); JMenuItem mntmListar = new JMenuItem("Listar"); mntmListar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { listarUsrIFrame = new ListarUsuariosInternalFrame(ICU); contentPane.setLayout(null); contentPane.add(listarUsrIFrame); listarUsrIFrame.setVisible(true); } }); mnNewMenu.add(mntmListar); JMenuItem Eliminados = new JMenuItem("Eliminados"); Eliminados.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { verUsrEliminadosFrame= new verUsuariosEliminados(); contentPane.setLayout(null); contentPane.add(verUsrEliminadosFrame); verUsrEliminadosFrame.setVisible(true); } }); mnNewMenu.add(Eliminados); JMenu mnVideos = new JMenu("Videos"); menuBar.add(mnVideos); JMenuItem mntmAlta_1 = new JMenuItem("Alta"); mntmAlta_1.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { altaVideoFrame = new AltaVideo(ICU, ICV); contentPane.setLayout(null); contentPane.add(altaVideoFrame); altaVideoFrame.setVisible(true); } }); mnVideos.add(mntmAlta_1); JMenuItem mntmNewMenuItem = new JMenuItem("Modificar"); mntmNewMenuItem.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { modificarVideoFrame = new ModificarVideo(ICU, ICV); contentPane.setLayout(null); contentPane.add(modificarVideoFrame); modificarVideoFrame.setVisible(true); } }); mnVideos.add(mntmNewMenuItem); JMenuItem mntmConsulta_1 = new JMenuItem("Consulta"); mntmConsulta_1.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { consultarVideoFrame = new ConsultarVideoInternalFrame(); contentPane.setLayout(null); contentPane.add(consultarVideoFrame); consultarVideoFrame.setVisible(true); } }); mnVideos.add(mntmConsulta_1); JMenuItem mntmValorar = new JMenuItem("Valorar"); mntmValorar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { valorarVideoFrame = new ValorarVideo(); contentPane.setLayout(null); contentPane.add(valorarVideoFrame); valorarVideoFrame.setVisible(true); } }); mnVideos.add(mntmValorar); JMenu mnNewMenu_1 = new JMenu("Listas"); menuBar.add(mnNewMenu_1); JMenuItem mntmCrear = new JMenuItem("Crear"); mntmCrear.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { crearListIFrame = new CrearListaInternalFrame(ICU); contentPane.setLayout(null); contentPane.add(crearListIFrame); crearListIFrame.setVisible(true); } }); mnNewMenu_1.add(mntmCrear); JMenuItem mntmAgregarVideo = new JMenuItem("Agregar Video"); mntmAgregarVideo.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { addVideoListIFrame = new AgregarVideoListaInternalFrame(ICU); contentPane.setLayout(null); contentPane.add(addVideoListIFrame); addVideoListIFrame.setVisible(true); } }); JMenuItem mntmModificar_1 = new JMenuItem("Modificar"); mntmModificar_1.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { modificarListaFrame = new ModificarListaFrame(ICU, ICV); contentPane.setLayout(null); contentPane.add(modificarListaFrame); modificarListaFrame.setVisible(true); } }); mnNewMenu_1.add(mntmModificar_1); mnNewMenu_1.add(mntmAgregarVideo); JMenuItem mntmQuitarVideo = new JMenuItem("Quitar Video"); mntmQuitarVideo.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { quitarVideoListIFrame = new QuitarVideoListaInternalFrame(ICU); contentPane.setLayout(null); contentPane.add(quitarVideoListIFrame); quitarVideoListIFrame.setVisible(true); } }); mnNewMenu_1.add(mntmQuitarVideo); JMenuItem mntmConsulta_3 = new JMenuItem("Consulta"); mntmConsulta_3.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { consultaListaFrame = new ConsultaListaInternalFrame(ICU, ICV); contentPane.setLayout(null); contentPane.add(consultaListaFrame); consultaListaFrame.setVisible(true); } }); mnNewMenu_1.add(mntmConsulta_3); JMenu mnCategorias = new JMenu("Categorias"); menuBar.add(mnCategorias); JMenuItem mntmAlta_2 = new JMenuItem("Alta"); mntmAlta_2.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { AltaCategoria ac = new AltaCategoria(ICV); contentPane.setLayout(null); contentPane.add(ac); ac.setVisible(true); } }); mnCategorias.add(mntmAlta_2); JMenuItem mntmConsulta_2 = new JMenuItem("Consulta"); mntmConsulta_2.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { consultacategoria = new ConsultaCategoriaJInternalFrame(ICV); contentPane.setLayout(null); contentPane.add(consultacategoria); consultacategoria.setVisible(true); } }); mnCategorias.add(mntmConsulta_2); JMenuItem mntmListar_1 = new JMenuItem("Listar"); mntmListar_1.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { listarCategoriasFrame = new ListarCategoriasInternalFrame(ICV); contentPane.setLayout(null); contentPane.add(listarCategoriasFrame); listarCategoriasFrame.setVisible(true); } }); mnCategorias.add(mntmListar_1); JButton btnCargarDatosPrueba = new JButton("Cargar Datos Prueba"); btnCargarDatosPrueba.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { DatosDePrueba dP = new DatosDePrueba(); try { dP.cargarDatosDePrueba(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }); menuBar.add(btnCargarDatosPrueba); JButton btnPublicarWsdl = new JButton("Publicar WSDL"); btnPublicarWsdl.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { uytubeLogica.publicar.WebServices webServices= new uytubeLogica.publicar.WebServices(); webServices.publicar(); } }); menuBar.add(btnPublicarWsdl); contentPane = new JPanel(); contentPane.setBorder(new EmptyBorder(5, 5, 5, 5)); contentPane.setLayout(new BorderLayout(0, 0)); setContentPane(contentPane); } } <file_sep>package uyTubePersistencia; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.persistence.EntityManager; import javax.persistence.EntityManagerFactory; import javax.persistence.Persistence; import javax.swing.event.ListSelectionEvent; public class PersistenciaCtrl { public Usuario[] getUsuariosPersistidos() { EntityManagerFactory emf = Persistence.createEntityManagerFactory("UyTubeJPA"); EntityManager em = emf.createEntityManager(); List<Usuario> usuarios= em.createQuery("Select u from Usuario u").getResultList(); return usuarios.toArray(new Usuario[0]); } public Map<Integer,String> listarUsuariosPersistidos() { EntityManagerFactory emf = Persistence.createEntityManagerFactory("UyTubeJPA"); EntityManager em = emf.createEntityManager(); List<Usuario> usuarios= em.createQuery("Select u from Usuario u").getResultList(); Map<Integer,String> pares = new HashMap<Integer,String>(); for(Usuario usuarioParticular:usuarios) { pares.put(usuarioParticular.getIdUsuario(), usuarioParticular.getNickname()); } return pares; } public Usuario getInfoUsuario(Integer idVid) { EntityManagerFactory emf = Persistence.createEntityManagerFactory("UyTubeJPA"); EntityManager em = emf.createEntityManager(); Usuario found = em.find(Usuario.class, idVid); return found; } } <file_sep>package uytube.admin.usuarios.listar; import java.awt.Container; import javax.swing.JInternalFrame; import javax.swing.JList; import javax.swing.JPanel; import javax.swing.JScrollPane; import uytubeLogic.logica.DtUsuario; public final class ListarUsuariosInternalFrame { private final JInternalFrame internalFrame = new JInternalFrame(); private final JPanel mainPanel = new JPanel(); private final Container container; public ListarUsuariosInternalFrame(final Container container) { this.container = container; this.container.add(internalFrame); initializeInternalFrame(); } private void initializeInternalFrame() { internalFrame.setVisible(false); internalFrame.setTitle("Alta Video"); internalFrame.setClosable(true); internalFrame.setResizable(true); internalFrame.setSize(330, 300); initializeMainPanel(); addContentToInternalFrame(); } private void initializeMainPanel() { final JList<DtUsuario> userList = new JList<DtUsuario>(getUsers()); final JScrollPane scrollPane = new JScrollPane(userList); mainPanel.add(scrollPane); } private DtUsuario[] getUsers() { final DtUsuario[] users = {}; return users; } private void addContentToInternalFrame() { internalFrame.add(mainPanel); } public void show() { internalFrame.show(); } public void hide() { internalFrame.hide(); } } <file_sep>package uytube.admin.videos.alta; import java.awt.FlowLayout; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import javax.swing.BoxLayout; import javax.swing.JButton; import javax.swing.JComboBox; import javax.swing.JInternalFrame; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JTextArea; import javax.swing.JTextField; public final class AltaVideoFormPanel { private final JPanel mainPanel = new JPanel(); private final BoxLayout panelLayout = new BoxLayout(mainPanel, BoxLayout.Y_AXIS); private final JTextField userNicknameTextField = new JTextField(); private final JTextField videoNameTextField = new JTextField(); private final JTextField videoURLTextField = new JTextField(); private final JTextArea videoDescriptionTextArea = new JTextArea(); private final JComboBox<String> videoCategoryComboBox = new JComboBox<>(); private final JButton acceptButton = new JButton("Aceptar"); private final JButton cancelButton = new JButton("Cancelar"); private final JInternalFrame internalFrameContainer; public AltaVideoFormPanel(final JInternalFrame internalFrameContainer) { this.internalFrameContainer = internalFrameContainer; initializePanel(); } private void initializePanel() { initializeAcceptButton(); initializeCancelButton(); initializePanelLayout(); mainPanel.setLayout(panelLayout); } private void initializeAcceptButton() { acceptButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { createVideo(); closeInternalFrameContainer(); } }); } private void createVideo() { final String userNickname = userNicknameTextField.getText(); final String videoName = videoNameTextField.getText(); final String videoURL = videoURLTextField.getText(); final String videoDescription = videoDescriptionTextArea.getText(); final String videoCategory = (String) videoCategoryComboBox.getSelectedItem(); // videoCtrl.createVideo(userNickname, videoName, videoUrl); } private void initializeCancelButton() { cancelButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { closeInternalFrameContainer(); } }); } private void closeInternalFrameContainer() { internalFrameContainer.dispose(); } private void initializePanelLayout() { final JLabel userNicknameLabel = new JLabel("Nickname del autor"); final JLabel videoNameLabel = new JLabel("Nombre del video"); final JLabel videoURLLabel = new JLabel("URL del video"); final JLabel videoDescriptionLabel = new JLabel("Descripcion del video"); final JLabel videoCategoryLabel = new JLabel("Categoria del video"); mainPanel.add(userNicknameLabel); mainPanel.add(userNicknameTextField); mainPanel.add(videoNameLabel); mainPanel.add(videoNameTextField); mainPanel.add(videoURLLabel); mainPanel.add(videoURLTextField); mainPanel.add(videoDescriptionLabel); mainPanel.add(videoDescriptionTextArea); for (final String category : getVideoCategories()) { videoCategoryComboBox.addItem(category); } mainPanel.add(videoCategoryLabel); mainPanel.add(videoCategoryComboBox); final JPanel buttonsPanel = new JPanel(); buttonsPanel.setLayout(new FlowLayout()); buttonsPanel.add(acceptButton); buttonsPanel.add(cancelButton); mainPanel.add(buttonsPanel); } private String[] getVideoCategories() { // Fetch the categories from the controller. final String[] categories = {}; return categories; } public JPanel getPanel() { return mainPanel; } } <file_sep>package uytube.admin.videos.consultar; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import javax.swing.BoxLayout; import javax.swing.DefaultComboBoxModel; import javax.swing.DefaultListModel; import javax.swing.JButton; import javax.swing.JInternalFrame; import javax.swing.JLabel; import javax.swing.JList; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JTextField; import uytube.admin.adminPrincipal; import uytube.admin.videos.ListarComentariosInternalFrame; import uytubeLogic.logica.DtInfoVideo; import uytubeLogic.logica.DtUsuario; import uytubeLogic.logica.DtVideo; import uytubeLogic.logica.Fabrica; import uytubeLogic.logica.IUsuarioCtrl; import uytubeLogic.logica.IVideoCtrl; import uytubeLogic.logica.SystemHandler.Privacidad; import java.awt.GridLayout; import java.awt.Component; import javax.swing.JTabbedPane; import javax.swing.event.ListSelectionListener; import javax.swing.event.ListSelectionEvent; import javax.swing.JComboBox; import java.awt.BorderLayout; import javax.swing.SwingConstants; public class ConsultarVideoInternalFrame extends JInternalFrame { static final long serialVersionUID = 113423; Fabrica fabrica = Fabrica.getInstance(); IUsuarioCtrl ICU = fabrica.getIUsuarioCtrl(); IVideoCtrl VCU = fabrica.getIVideoCtrl(); private JComboBox authorNicknameComboBox = new JComboBox(); private final JPanel mainPanel = new JPanel(); private final JPanel videoDetailsPanel = new JPanel(); private DefaultListModel<String> videoListModel = new DefaultListModel<String>(); private DefaultListModel<String> UsuariosGListModel = new DefaultListModel<String>(); private DefaultListModel<String> UsuariosNoGListModel = new DefaultListModel<String>(); private final JList<String> videoList; private DtVideo selectedVideo; private DtInfoVideo infoVideo; private String authorNickname=""; private JTextField videoNameTextField = new JTextField(); private JTextField videoDescriptionTextField = new JTextField(); private JTextField videoURLTextField = new JTextField(); private JTextField videoDuracionTextField = new JTextField(); private final JPanel panel = new JPanel(); private final JPanel panel_1 = new JPanel(); private final JTabbedPane TabComunidad = new JTabbedPane(JTabbedPane.TOP); private final JPanel UsuGusta = new JPanel(); private final JList<String> listGusta = new JList<>(UsuariosGListModel); private final JPanel UsuNoGusta = new JPanel(); private final JList<String> listNoGusta = new JList<>(UsuariosNoGListModel); private final JPanel Comentarios = new JPanel(); private final JLabel lblVideosDelAutor = new JLabel("Videos del autor:"); private final JButton btnCargar = new JButton("Cargar"); private final JLabel lblCategoria = new JLabel("Categoria"); private final JTextField txtCategoria = new JTextField(); private final JLabel lblPrivacidad = new JLabel("Privacidad"); private final JTextField privacidadTextField = new JTextField(); public static void infoBox(String infoMessage, String titleBar){ JOptionPane.showMessageDialog(null, infoMessage, "" + titleBar, JOptionPane.INFORMATION_MESSAGE); } //funcion para cuando es llamada con un video. public void llamadaParticular(String nicknameAutor, String nombreVideo) { authorNicknameComboBox.setSelectedItem(nicknameAutor); authorNickname = nicknameAutor; authorNicknameComboBox.setEnabled(false); videoListModel.clear(); videoListModel.addElement(nombreVideo); videoListModel.setElementAt(videoListModel.getElementAt(0), 0); } public ConsultarVideoInternalFrame() { privacidadTextField.setEditable(false); privacidadTextField.setColumns(10); txtCategoria.setEditable(false); txtCategoria.setColumns(10); setTitle("Consultar Video"); BorderLayout borderLayout = (BorderLayout) getContentPane().getLayout(); borderLayout.setVgap(2); borderLayout.setHgap(2); setResizable(true); setMaximizable(true); setIconifiable(true); setClosable(true); setBounds(100, 100, 450, 448); getContentPane().add(mainPanel); final BoxLayout mainPanelLayout = new BoxLayout(mainPanel, BoxLayout.Y_AXIS); mainPanel.setLayout(mainPanelLayout); final JLabel authorNicknameLabel = new JLabel("Nickname del autor:"); authorNicknameLabel.setHorizontalAlignment(SwingConstants.CENTER); authorNicknameLabel.setAlignmentX(Component.CENTER_ALIGNMENT); mainPanel.add(authorNicknameLabel); String[] nicknamesArray = ICU.listarNicknamesUsuarios(); authorNicknameComboBox.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { handleVideoSearch(); videoNameTextField.setText(""); videoDescriptionTextField.setText(""); videoURLTextField.setText(""); videoDuracionTextField.setText(""); privacidadTextField.setText(""); txtCategoria.setText(""); } }); authorNicknameComboBox.setModel(new DefaultComboBoxModel(nicknamesArray)); authorNicknameComboBox.setSelectedIndex(-1); mainPanel.add(authorNicknameComboBox); initializeSearchVideosButton(); //restaurar el final, si no funciona . videoList = new JList<>(videoListModel); videoList.addListSelectionListener(new ListSelectionListener() { @Override public void valueChanged(ListSelectionEvent arg0) { if (!arg0.getValueIsAdjusting()) { if(!videoList.isSelectionEmpty()){ String nomVideo=videoList.getSelectedValue(); selectedVideo=ICU.obtenerInfoAdicVideo(authorNickname, nomVideo); infoVideo=VCU.verDetallesVideoExt(selectedVideo.getiDVideo()); handleVideoSelect(infoVideo); } } } }); lblVideosDelAutor.setHorizontalAlignment(SwingConstants.CENTER); lblVideosDelAutor.setAlignmentX(Component.CENTER_ALIGNMENT); mainPanel.add(lblVideosDelAutor); mainPanel.add(new JScrollPane(videoList)); initializeVideoDetailsPane(); mainPanel.add(videoDetailsPanel); videoDetailsPanel.add(panel); panel.setLayout(new GridLayout(0, 1, 0, 0)); final JLabel videoNameLabel = new JLabel("Nombre:"); panel.add(videoNameLabel); videoNameTextField.setEditable(false); panel.add(videoNameTextField); final JLabel videoDescriptionLabel = new JLabel("Descripcion:"); panel.add(videoDescriptionLabel); videoDescriptionTextField.setEditable(false); panel.add(videoDescriptionTextField); final JLabel videoURLLabel = new JLabel("URL:"); panel.add(videoURLLabel); videoURLTextField.setEditable(false); panel.add(videoURLTextField); final JLabel videoDuracionLabel = new JLabel("Duracion:"); panel.add(videoDuracionLabel); videoDuracionTextField.setEditable(false); panel.add(videoDuracionTextField); panel.add(lblPrivacidad); panel.add(privacidadTextField); panel.add(lblCategoria); panel.add(txtCategoria); videoDetailsPanel.add(panel_1); panel_1.setLayout(new GridLayout(0, 1, 0, 0)); TabComunidad.setToolTipText(""); panel_1.add(TabComunidad); TabComunidad.addTab("Le Gusta", null, UsuGusta, null); UsuGusta.setLayout(new GridLayout(0, 1, 0, 0)); UsuGusta.add(listGusta); TabComunidad.addTab("No Le Gusta", null, UsuNoGusta, null); UsuNoGusta.setLayout(new GridLayout(0, 1, 0, 0)); UsuNoGusta.add(listNoGusta); TabComunidad.addTab("Comentarios", null, Comentarios, null); btnCargar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { if(!videoList.isSelectionEmpty()&&authorNicknameComboBox.getSelectedIndex()!=-1){ String nick = (String) authorNicknameComboBox.getSelectedItem(); String video = videoList.getSelectedValue(); if(!nick.isEmpty() && !video.isEmpty()){ ListarComentariosInternalFrame comentariosIFrame = new ListarComentariosInternalFrame((String) authorNicknameComboBox.getSelectedItem(),videoList.getSelectedValue()); adminPrincipal.getFrames()[0].setLayout(null); adminPrincipal.getFrames()[0].add(comentariosIFrame); comentariosIFrame.show(); }else{ infoBox("No hay usuario y/o video seleccionado","Consulta Video"); } }else{ infoBox("No hay usuario y/o video seleccionado","Consulta Video"); } } }); Comentarios.add(btnCargar); } private void initializeSearchVideosButton() { } private void handleVideoSearch() { authorNickname = (String) authorNicknameComboBox.getSelectedItem(); videoListModel.clear(); UsuariosGListModel.clear(); UsuariosNoGListModel.clear(); if(ICU.memberUsuario(authorNickname)){ String[] videosCanal=ICU.listarVideosCanal(authorNickname); for(String vid:videosCanal){ videoListModel.addElement(vid); } } } private void handleVideoSelect(final DtInfoVideo infoVideo) { this.infoVideo = infoVideo; videoNameTextField.setText(this.selectedVideo.getNombre()); videoDescriptionTextField.setText(this.selectedVideo.getDescripcion()); videoURLTextField.setText(this.selectedVideo.getUrl()); txtCategoria.setText(this.selectedVideo.getCategoria().getNombre()); Integer duracionMM=this.selectedVideo.getDuracionSS()/60; Integer duracionSS=this.selectedVideo.getDuracionSS()%60; videoDuracionTextField.setText(Integer.toString(duracionMM)+":"+Integer.toString(duracionSS));//arreglar la duracion para que la muestre en minutos if(this.selectedVideo.getPrivacidad()==Privacidad.PRIVADO) { privacidadTextField.setText("Privado"); } else{ privacidadTextField.setText("Publico"); } DtUsuario[] usuGustan=infoVideo.getUsuariosGusta(); DtUsuario[] usuNoGustan=infoVideo.getUsuariosNoGusta(); String[]nickUsuGustan=new String[usuGustan.length]; String[]nickUsuNoGustan=new String[usuNoGustan.length]; int i=0; UsuariosGListModel.clear(); for(DtUsuario usuario:usuGustan){ nickUsuGustan[i]=usuario.getNickname(); UsuariosGListModel.addElement(nickUsuGustan[i]); i++; } i=0; UsuariosNoGListModel.clear(); for(DtUsuario usuario:usuNoGustan){ nickUsuNoGustan[i]=usuario.getNickname(); UsuariosNoGListModel.addElement(nickUsuNoGustan[i]); i++; } //el clear de comentarios //aca falta agregar a los comentarios } private void initializeVideoDetailsPane() { videoDetailsPanel.setLayout(new GridLayout(0, 2, 0, 0)); } } <file_sep>package uyTubePersistencia; import java.io.Serializable; import java.util.List; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.JoinTable; import javax.persistence.ManyToMany; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.SystemHandler.Privacidad; @Entity public class Video implements Serializable{ /** * */ private static final long serialVersionUID = 1L; @Id private Integer idVideo; private String nombre; private String descripcion; private int duracion; private String Fecha; private String urlVideo; private Privacidad privacidad; public Video() { // TODO Auto-generated constructor stub } public Video(uytubeLogic.logica.Video video) { this.setIdVideo(video.getIDVideo()); this.setDescripcion(video.getDescripcion()); this.setNombre(video.getNombre()); this.setDuracion(video.getDuracion()); this.setFecha(video.getFechaPublicacion().getFecha().toString()); this.setPrivacidad(video.getPrivacidad()); this.setUrlVideo(video.getURL()); } public String getFecha() { return Fecha; } public void setFecha(String fecha) { Fecha = fecha; } public Integer getIdVideo() { return idVideo; } public void setIdVideo(Integer idVideo) { this.idVideo = idVideo; } public String getNombre() { return nombre; } public void setNombre(String nombre) { this.nombre = nombre; } public String getDescripcion() { return descripcion; } public void setDescripcion(String descripcion) { this.descripcion = descripcion; } public int getDuracion() { return duracion; } public void setDuracion(int duracion) { this.duracion = duracion; } public String getUrlVideo() { return urlVideo; } public void setUrlVideo(String urlVideo) { this.urlVideo = urlVideo; } public Privacidad getPrivacidad() { return privacidad; } public void setPrivacidad(Privacidad privacidad) { this.privacidad = privacidad; } public static long getSerialversionuid() { return serialVersionUID; } } <file_sep><%@page import="java.text.SimpleDateFormat"%> <%@page import="java.text.DateFormat"%> <%@page import="uytubeLogica.publicar.DtListaReproduccion"%> <%@ page import = "uytubeLogica.publicar.DtVideo"%> <%@ page import = "uytubeLogica.publicar.Privacidad"%> <%@ page import = "uytubeLogica.publicar.DtCanal"%> <%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <%@page errorPage="../error/error404.jsp" %> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <%@include file="../buscador.jsp" %> <style>table{ width: 100%; table-layout: fixed; } th { color: white; padding: 15px; text-align: left; border-bottom: 1px solid #ddd; background-color: #ff0000; color: white; vertical-align: text-top; } td { padding: 15px; text-align: left; border-bottom: 1px solid #ddd; } tr:nth-child(even) {background-color: #f2f2f2;}</style> <link rel="stylesheet" href="/media/styles/Busqueda.css"> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Resultados De Consulta</title> </head> <body> <table id="TablaContenidos"> <tr> <th valign="top"> Tipo </th> <th valign="top"> Nombre </th> <th valign="top"> Propietario </th> </tr> <% DtVideo[] vid = (DtVideo[]) request.getAttribute("videos"); for (DtVideo entry : vid) { String nombreV=entry.getNombre(); String descV=entry.getDescripcion(); String propietarioV = entry.getPropietario(); request.setAttribute("IDVideo", entry.getIDVideo().toString()); request.setAttribute(nombreV, nombreV); if(entry.getPrivacidad().equals(Privacidad.PUBLICO)){ %> <tr> <td>Video <form action="watch" method="get"> <input type="hidden" name="opcion" value="ver"> <input type="hidden" name="ID" value="<%=entry.getIDVideo()%>"> <input type="submit" value="Ver Ahora"> </form> </td> <td id="NombreTD"><%=nombreV%></td> <td id="PropietarioTD"><%=propietarioV %></td> </tr> <% } } DtListaReproduccion[] listas=(DtListaReproduccion[]) request.getAttribute("listas"); for(DtListaReproduccion entry: listas){ if(entry.getPrivado().equals(Privacidad.PUBLICO)){ %> <tr> <td>Lista de Reproduccion <form action="watch" method="get"> <input type="hidden" name="opcion" value="consulta"> <input type="submit" value="Ver Info"> </form> </td> <td id="NombreTD"><%=entry.getNombre()%></td> <td id="PropietarioTD"><%=entry.getPropietario() %></td> </tr> <% } } %> <tr> <td> </tr> </table> </body> </html><file_sep>package uytubeLogica.publicar; import javax.jws.WebMethod; import javax.jws.WebParam; import javax.jws.WebResult; import javax.jws.WebService; import javax.jws.soap.SOAPBinding; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.ws.Action; /** * This class was generated by Apache CXF 3.2.6 * 2018-11-14T13:58:41.829-03:00 * Generated source version: 3.2.6 * */ @WebService(targetNamespace = "http://publicar.uytubeLogica/", name = "WebServices") @XmlSeeAlso({ObjectFactory.class, net.java.dev.jaxb.array.ObjectFactory.class}) @SOAPBinding(style = SOAPBinding.Style.RPC) public interface WebServices { @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/cambiarPrivLDRRequest", output = "http://publicar.uytubeLogica/WebServices/cambiarPrivLDRResponse") public void cambiarPrivLDR( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") Privacidad arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/operacionPruebaRequest", output = "http://publicar.uytubeLogica/WebServices/operacionPruebaResponse") public void operacionPrueba(); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarLDRPublicasPorNombreRequest", output = "http://publicar.uytubeLogica/WebServices/listarLDRPublicasPorNombreResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtListaReproduccionArray listarLDRPublicasPorNombre( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarLDRPorCategoriaRequest", output = "http://publicar.uytubeLogica/WebServices/listarLDRPorCategoriaResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtListaReproduccionArray listarLDRPorCategoria( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") Privacidad arg1, @WebParam(partName = "arg2", name = "arg2") java.lang.String arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/verificarDispUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/verificarDispUsuarioResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public boolean verificarDispUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarUsuariosQueSigueRequest", output = "http://publicar.uytubeLogica/WebServices/listarUsuariosQueSigueResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public net.java.dev.jaxb.array.StringArray listarUsuariosQueSigue( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarCanalesPublicosPorNombreRequest", output = "http://publicar.uytubeLogica/WebServices/listarCanalesPublicosPorNombreResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtCanalArray listarCanalesPublicosPorNombre( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarDatosUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/listarDatosUsuarioResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtUsuario listarDatosUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/seguirUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/seguirUsuarioResponse") public void seguirUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/valorarVideoRequest", output = "http://publicar.uytubeLogica/WebServices/valorarVideoResponse") public void valorarVideo( @WebParam(partName = "arg0", name = "arg0") int arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") boolean arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/infoLDRdeUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/infoLDRdeUsuarioResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtListaReproduccionArray infoLDRdeUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") Privacidad arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/mostrarInfoCanalRequest", output = "http://publicar.uytubeLogica/WebServices/mostrarInfoCanalResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtCanal mostrarInfoCanal( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/agregarVideoListaRequest", output = "http://publicar.uytubeLogica/WebServices/agregarVideoListaResponse") public void agregarVideoLista( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") int arg1, @WebParam(partName = "arg2", name = "arg2") java.lang.String arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/verDetallesVideoExtRequest", output = "http://publicar.uytubeLogica/WebServices/verDetallesVideoExtResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtInfoVideo verDetallesVideoExt( @WebParam(partName = "arg0", name = "arg0") int arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/responderComentarioRequest", output = "http://publicar.uytubeLogica/WebServices/responderComentarioResponse") public void responderComentario( @WebParam(partName = "arg0", name = "arg0") int arg0, @WebParam(partName = "arg1", name = "arg1") int arg1, @WebParam(partName = "arg2", name = "arg2") java.lang.String arg2, @WebParam(partName = "arg3", name = "arg3") DtFecha arg3, @WebParam(partName = "arg4", name = "arg4") java.lang.String arg4 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/memberVideoRequest", output = "http://publicar.uytubeLogica/WebServices/memberVideoResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public boolean memberVideo( @WebParam(partName = "arg0", name = "arg0") int arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/agregarVisitaRequest", output = "http://publicar.uytubeLogica/WebServices/agregarVisitaResponse") public void agregarVisita( @WebParam(partName = "arg0", name = "arg0") int arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarVideosPorCategoriaRequest", output = "http://publicar.uytubeLogica/WebServices/listarVideosPorCategoriaResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtVideoArray listarVideosPorCategoria( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") Privacidad arg1, @WebParam(partName = "arg2", name = "arg2") java.lang.String arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/nuevoUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/nuevoUsuarioResponse") public void nuevoUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") java.lang.String arg2, @WebParam(partName = "arg3", name = "arg3") java.lang.String arg3, @WebParam(partName = "arg4", name = "arg4") java.lang.String arg4, @WebParam(partName = "arg5", name = "arg5") DtFecha arg5, @WebParam(partName = "arg6", name = "arg6") byte[] arg6, @WebParam(partName = "arg7", name = "arg7") java.lang.String arg7, @WebParam(partName = "arg8", name = "arg8") java.lang.String arg8, @WebParam(partName = "arg9", name = "arg9") Privacidad arg9, @WebParam(partName = "arg10", name = "arg10") java.lang.String arg10 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/verificarLoginRequest", output = "http://publicar.uytubeLogica/WebServices/verificarLoginResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public boolean verificarLogin( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/nuevaListaParticularRequest", output = "http://publicar.uytubeLogica/WebServices/nuevaListaParticularResponse") public void nuevaListaParticular( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") Privacidad arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/nuevoComentarioRequest", output = "http://publicar.uytubeLogica/WebServices/nuevoComentarioResponse") public void nuevoComentario( @WebParam(partName = "arg0", name = "arg0") int arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") DtFecha arg2, @WebParam(partName = "arg3", name = "arg3") java.lang.String arg3 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/eliminarVideoListaRequest", output = "http://publicar.uytubeLogica/WebServices/eliminarVideoListaResponse") public void eliminarVideoLista( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") int arg1, @WebParam(partName = "arg2", name = "arg2") java.lang.String arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarVideoListaReproduccionRequest", output = "http://publicar.uytubeLogica/WebServices/listarVideoListaReproduccionResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtVideoArray listarVideoListaReproduccion( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/getEstadoValoracionRequest", output = "http://publicar.uytubeLogica/WebServices/getEstadoValoracionResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public java.lang.String getEstadoValoracion( @WebParam(partName = "arg0", name = "arg0") int arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarCategoriasRequest", output = "http://publicar.uytubeLogica/WebServices/listarCategoriasResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtCategoriaArray listarCategorias(); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarVideoHistorialRequest", output = "http://publicar.uytubeLogica/WebServices/listarVideoHistorialResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtVideoHistorialArray listarVideoHistorial( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/memberListaReproduccionPropiaRequest", output = "http://publicar.uytubeLogica/WebServices/memberListaReproduccionPropiaResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public boolean memberListaReproduccionPropia( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarComentariosRequest", output = "http://publicar.uytubeLogica/WebServices/listarComentariosResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtComentarioArray listarComentarios( @WebParam(partName = "arg0", name = "arg0") int arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/obtenerDtsVideosListaReproduccionUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/obtenerDtsVideosListaReproduccionUsuarioResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtVideoArray obtenerDtsVideosListaReproduccionUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/infoListaReproduccionRequest", output = "http://publicar.uytubeLogica/WebServices/infoListaReproduccionResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtListaReproduccion infoListaReproduccion( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/aniadirVideoRequest", output = "http://publicar.uytubeLogica/WebServices/aniadirVideoResponse") public void aniadirVideo( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") java.lang.String arg2, @WebParam(partName = "arg3", name = "arg3") int arg3, @WebParam(partName = "arg4", name = "arg4") DtFecha arg4, @WebParam(partName = "arg5", name = "arg5") java.lang.String arg5, @WebParam(partName = "arg6", name = "arg6") DtCategoria arg6, @WebParam(partName = "arg7", name = "arg7") Privacidad arg7 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarUsuariosQueLeSigueRequest", output = "http://publicar.uytubeLogica/WebServices/listarUsuariosQueLeSigueResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public net.java.dev.jaxb.array.StringArray listarUsuariosQueLeSigue( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/infoVideosCanalRequest", output = "http://publicar.uytubeLogica/WebServices/infoVideosCanalResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtVideoArray infoVideosCanal( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1, @WebParam(partName = "arg2", name = "arg2") Privacidad arg2 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarLDRdeUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/listarLDRdeUsuarioResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public net.java.dev.jaxb.array.StringArray listarLDRdeUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/bajaUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/bajaUsuarioResponse") public void bajaUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/infoAddVideoRequest", output = "http://publicar.uytubeLogica/WebServices/infoAddVideoResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtVideo infoAddVideo( @WebParam(partName = "arg0", name = "arg0") int arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/cargarDatosRequest", output = "http://publicar.uytubeLogica/WebServices/cargarDatosResponse") public void cargarDatos(); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/listarVideosPublicosPorNombreRequest", output = "http://publicar.uytubeLogica/WebServices/listarVideosPublicosPorNombreResponse") @WebResult(name = "return", targetNamespace = "http://publicar.uytubeLogica/", partName = "return") public DtVideoArray listarVideosPublicosPorNombre( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0 ); @WebMethod @Action(input = "http://publicar.uytubeLogica/WebServices/dejarUsuarioRequest", output = "http://publicar.uytubeLogica/WebServices/dejarUsuarioResponse") public void dejarUsuario( @WebParam(partName = "arg0", name = "arg0") java.lang.String arg0, @WebParam(partName = "arg1", name = "arg1") java.lang.String arg1 ); } <file_sep>package uytube.admin.videos; import java.awt.EventQueue; import javax.swing.JInternalFrame; import java.awt.GridLayout; import javax.swing.JLabel; import javax.swing.JPanel; import java.awt.BorderLayout; import javax.swing.JComboBox; import javax.swing.BoxLayout; import javax.swing.ButtonGroup; import javax.swing.DefaultListModel; import javax.swing.JList; import javax.swing.JOptionPane; import javax.swing.JButton; import javax.swing.JRadioButton; import java.awt.event.ActionListener; import java.awt.event.ActionEvent; import javax.swing.event.ListSelectionListener; import uytubeLogic.logica.DtVideo; import uytubeLogic.logica.Fabrica; import uytubeLogic.logica.IUsuarioCtrl; import uytubeLogic.logica.IVideoCtrl; import javax.swing.event.ListSelectionEvent; public class ValorarVideo extends JInternalFrame { private String nickUsuario; private String nickValorador; private String nomVideo=null; private Integer IDVideo=-1; private Boolean valoracion; private DefaultListModel<String> videoListModel = new DefaultListModel<String>(); private final ButtonGroup MeGustaBtnGroup = new ButtonGroup(); private JComboBox<String> comboBoxN2; private static void infoBox(String infoMessage, String titleBar){ JOptionPane.showMessageDialog(null, infoMessage, "" + titleBar, JOptionPane.INFORMATION_MESSAGE); } Fabrica fabrica=Fabrica.getInstance(); IUsuarioCtrl ICU=fabrica.getIUsuarioCtrl(); IVideoCtrl VCU=fabrica.getIVideoCtrl(); /** * Launch the application. */ public static void main(String[] args) { EventQueue.invokeLater(new Runnable() { public void run() { try { ValorarVideo frame = new ValorarVideo(); frame.setVisible(true); } catch (Exception e) { e.printStackTrace(); } } }); } /** * Create the frame. */ public ValorarVideo() { setTitle("Valorar Video"); setResizable(true); setMaximizable(true); setIconifiable(true); setClosable(true); setBounds(100, 100, 597, 300); getContentPane().setLayout(new GridLayout(1, 0, 0, 0)); JPanel panel = new JPanel(); getContentPane().add(panel); panel.setLayout(new BorderLayout(0, 0)); JPanel panel_2 = new JPanel(); panel.add(panel_2, BorderLayout.NORTH); panel_2.setLayout(new BoxLayout(panel_2, BoxLayout.X_AXIS)); JLabel lblNickname = new JLabel("Nickname"); panel_2.add(lblNickname); JComboBox<String> comboBoxNickname = new JComboBox<String>(); panel_2.add(comboBoxNickname); //Cargar Nick String[] nickUsuarios = ICU.listarNicknamesUsuarios(); for(int i=0; i<nickUsuarios.length;i++){ comboBoxNickname.addItem(nickUsuarios[i]); } comboBoxNickname.setSelectedIndex(-1); JList<String> listVideos = new JList<>(videoListModel); listVideos.addListSelectionListener(new ListSelectionListener() { public void valueChanged(ListSelectionEvent arg0) { if (!arg0.getValueIsAdjusting()&&!listVideos.isSelectionEmpty()) { nomVideo=listVideos.getSelectedValue(); DtVideo vid=ICU.obtenerInfoAdicVideo(nickUsuario, nomVideo); IDVideo=vid.getiDVideo(); comboBoxN2.setEnabled(true); } } }); panel.add(listVideos, BorderLayout.CENTER); JPanel panel_1 = new JPanel(); getContentPane().add(panel_1); panel_1.setLayout(new BorderLayout(0, 0)); JPanel panel_3 = new JPanel(); panel_1.add(panel_3, BorderLayout.NORTH); panel_3.setLayout(new BoxLayout(panel_3, BoxLayout.X_AXIS)); JLabel lblIngreseValoracion = new JLabel("Ingrese Valoracion"); panel_3.add(lblIngreseValoracion); JPanel panel_5 = new JPanel(); panel_1.add(panel_5, BorderLayout.SOUTH); panel_5.setLayout(new GridLayout(0, 2, 0, 0)); JButton btnValorar = new JButton("Valorar"); panel_5.add(btnValorar); JButton btnCancelar = new JButton("Cancelar"); btnCancelar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { setVisible(false); dispose(); } }); panel_5.add(btnCancelar); JPanel panel_6 = new JPanel(); panel_1.add(panel_6, BorderLayout.CENTER); panel_6.setLayout(new BorderLayout(0, 0)); JPanel panel_4 = new JPanel(); panel_6.add(panel_4, BorderLayout.NORTH); panel_4.setLayout(new GridLayout(0, 2, 0, 0)); comboBoxN2 = new JComboBox<String>(); comboBoxN2.setEnabled(false); comboBoxN2.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { if(comboBoxN2.getSelectedIndex()!=-1 && comboBoxNickname.getSelectedIndex()!=-1){ String nickU = (String) comboBoxNickname.getSelectedItem(); nickValorador=comboBoxNickname.getSelectedItem().toString(); } } }); panel_4.add(comboBoxN2); comboBoxN2.setSelectedIndex(-1); for(int i=0; i<nickUsuarios.length;i++){ comboBoxN2.addItem(nickUsuarios[i]); } comboBoxN2.setSelectedIndex(-1); comboBoxNickname.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { if(comboBoxNickname.getSelectedIndex()!=-1){ String nickU = (String) comboBoxNickname.getSelectedItem(); limpiar(); //pedir Dt nickUsuario=comboBoxNickname.getSelectedItem().toString(); cargarVideos(nickUsuario); } } }); JLabel lblNickDelQue = new JLabel("Nick del que valora"); panel_4.add(lblNickDelQue); JPanel panel_7 = new JPanel(); panel_6.add(panel_7); JRadioButton rdbtnMeGusta = new JRadioButton("Me Gusta"); rdbtnMeGusta.setSelected(true); MeGustaBtnGroup.add(rdbtnMeGusta); panel_7.add(rdbtnMeGusta); JRadioButton rdbtnNoMeGusta = new JRadioButton("No Me Gusta"); MeGustaBtnGroup.add(rdbtnNoMeGusta); panel_7.add(rdbtnNoMeGusta); btnValorar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { if(rdbtnMeGusta.isSelected()) valoracion=true; else valoracion=false; if(IDVideo!=-1&&nickUsuario!=null&&nickValorador!=null&&comboBoxNickname.getSelectedIndex()!=-1&&comboBoxN2.getSelectedIndex()!=-1){ VCU.valorarVideo(IDVideo, nickValorador, valoracion); infoBox("Se valor� con exito", "Valoracion exitosa"); setVisible(false); dispose(); } } }); } private void limpiar(){ videoListModel.clear(); IDVideo=-1; nickUsuario=null; nickValorador=null; comboBoxN2.setEnabled(false); comboBoxN2.setSelectedIndex(-1); } private void cargarVideos(String nickU){ String[] nomVideos=ICU.listarVideosCanal(nickU); for(String videoName:nomVideos){ videoListModel.addElement(videoName); } } } <file_sep>package uytubeWeb.servlets; import java.io.IOException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.GregorianCalendar; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; import javax.xml.datatype.DatatypeConfigurationException; import javax.xml.datatype.DatatypeFactory; import javax.xml.datatype.XMLGregorianCalendar; import uytubeLogica.publicar.DtCanal; import uytubeLogica.publicar.DtCategoria; import uytubeLogica.publicar.DtComentario; import uytubeLogica.publicar.DtFecha; import uytubeLogica.publicar.DtInfoVideo; import uytubeLogica.publicar.DtUsuario; import uytubeLogica.publicar.DtVideo; import uytubeLogica.publicar.Privacidad; /** * Servlet implementation class VideoServlet */ @WebServlet(name = "VideoServlet", urlPatterns = { "/watch", "/newVideo", "/modifyVideo", "/likeVideo", "/dislikeVideo", "/newComment", "/newResponse", "/leaveFollow" }) public class VideoServlet extends HttpServlet { private static final long serialVersionUID = 99L; /** * @see HttpServlet#HttpServlet() */ public VideoServlet() { super(); // TODO Auto-generated constructor stub } private boolean isInteger(String input) { try { Integer.parseInt(input); return true; } catch (Exception e) { return false; } } public static Date ParseFecha(String fecha) { SimpleDateFormat formato = new SimpleDateFormat("dd/MM/yyyy"); Date fechaDate = null; try { fechaDate = formato.parse(fecha); } catch (ParseException ex) { System.out.println(ex); } return fechaDate; } private void crearVideo(String login, String nomVideo, String duracion, String url, String fecha, String categoria, String descripcionV) { uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); System.out.println("estoy creando el video"); DtFecha fechaPublicacionV = new DtFecha(); Date laDate = ParseFecha(fecha); GregorianCalendar gcal = new GregorianCalendar(); gcal.setTime(laDate); XMLGregorianCalendar xgcal = null; try { xgcal = DatatypeFactory.newInstance().newXMLGregorianCalendar(gcal); } catch (DatatypeConfigurationException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } fechaPublicacionV.setFecha(xgcal); DtCategoria catV = new DtCategoria(); catV.setNombre(categoria); port.aniadirVideo(login, nomVideo, descripcionV, (Integer.parseInt(duracion)), fechaPublicacionV, url, catV, Privacidad.PRIVADO); } private void seguirUsuario(String nombre_usuario, String propietario) { uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); port.seguirUsuario(nombre_usuario, propietario); } private void dejarDeSeguirUsuario(String nombre_usuario, String propietario) { uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); System.out.println("el usuario es :" + nombre_usuario + " y el propietario es : " + propietario); port.dejarUsuario(nombre_usuario, propietario); ; } private void comentarVideo(int id_video, String comentador, String contenido) { uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); Date fecha_actual = new Date(); DtFecha fecha = new DtFecha(); Date laDate = fecha_actual; GregorianCalendar gcal = new GregorianCalendar(); gcal.setTime(laDate); XMLGregorianCalendar xgcal = null; try { xgcal = DatatypeFactory.newInstance().newXMLGregorianCalendar(gcal); } catch (DatatypeConfigurationException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } fecha.setFecha(xgcal); port.nuevoComentario(id_video, comentador, fecha, contenido); } private String[] conseguirNicknamesUsuariosDelDtUsuario(DtUsuario[] dataUsuario) { String[] listaADevolver = new String[dataUsuario.length]; for (int index = 0; index < dataUsuario.length; index++) { listaADevolver[index] = dataUsuario[index].getNickname(); } return listaADevolver; } /** * @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse * response) */ protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); // TODO Auto-generated method stub // response.getWriter().append("Served at: ").append(request.getContextPath()); System.out.println("estoy en videoServlet GET"); String opc = request.getParameter("opcion"); System.out.println(opc); switch (opc) { case "altaVideo": { HttpSession session = request.getSession(false); if (session != null && session.getAttribute("nombre_usuario") != null) { DtCategoria[] cat = port.listarCategorias().getItem().toArray(new DtCategoria[0]); for (DtCategoria entry : cat) { entry.setNombre(entry.getNombre().replace(" ", "||")); } request.setAttribute("listadoCat", cat); request.getRequestDispatcher("WEB-INF/Video/AltaVideo.jsp").forward(request, response); }else response.sendRedirect(request.getContextPath() + "/casa"); break; } case "null": break; case "likeVideo": { System.out.println("Quiero darle me gusta a un video"); HttpSession session = request.getSession(false); if (session != null && session.getAttribute("nombre_usuario") != null) { Integer id_video = Integer.parseInt(request.getParameter("id_video")); String nombre_usuario = (String) session.getAttribute("nombre_usuario"); port.valorarVideo(id_video, nombre_usuario, true); System.out.println("usuario " + nombre_usuario + " id_video:" + id_video);// esto es para ver si no // manda nada null. System.out.println("le di me gusta"); } break; } case "dislikeVideo": { System.out.println("Quiero darle no me gusta a un video"); HttpSession session = request.getSession(false); if (session != null && session.getAttribute("nombre_usuario") != null) { Integer id_video = Integer.parseInt(request.getParameter("id_video")); String nombre_usuario = (String) session.getAttribute("nombre_usuario"); port.valorarVideo(id_video, nombre_usuario, false); System.out.println("usuario " + nombre_usuario + " id_video:" + id_video); System.out.println("le di no me gusta"); } break; } case "ver": { System.out.println("Quiero ver un video"); if (port.memberVideo(Integer.parseInt(request.getParameter("ID")))) { System.out.println("EL VIDEO ES NULL?"); System.out.println(port.memberVideo(Integer.parseInt(request.getParameter("ID")))); DtVideo dataVideo = port.infoAddVideo(Integer.parseInt(request.getParameter("ID"))); request.setAttribute("dataVideo", dataVideo); DtComentario[] comentarios = port.listarComentarios(dataVideo.getIDVideo()).getItem() .toArray(new DtComentario[0]); request.setAttribute("comentarios", comentarios); DtUsuario usuario_propietario = port.listarDatosUsuario(dataVideo.getPropietario()); request.setAttribute("usuario_propietario", usuario_propietario); DtCanal canal_propietario = port.mostrarInfoCanal(dataVideo.getPropietario()); request.setAttribute("canal_propietario", canal_propietario); DtInfoVideo infoVideo = port.verDetallesVideoExt(Integer.parseInt(request.getParameter("ID"))); request.setAttribute("cantLikes", infoVideo.getUsuariosGusta().size()); request.setAttribute("cantDislikes", infoVideo.getUsuariosNoGusta().size()); String[] listaLikes = conseguirNicknamesUsuariosDelDtUsuario( infoVideo.getUsuariosGusta().toArray(new DtUsuario[0])); String[] listaDislikes = conseguirNicknamesUsuariosDelDtUsuario( infoVideo.getUsuariosNoGusta().toArray(new DtUsuario[0])); request.setAttribute("listaLikes", listaLikes); request.setAttribute("listaDislikes", listaDislikes); HttpSession session = request.getSession(false); if (session != null && session.getAttribute("nombre_usuario") != null) { request.setAttribute("logged", true); String usuarioLogged = (String) session.getAttribute("nombre_usuario"); String[] listasReproduccionUsuarioLogged = port.listarLDRdeUsuario(usuarioLogged).getItem() .toArray(new String[0]); request.setAttribute("listasReproduccionUsuarioLogged", listasReproduccionUsuarioLogged); Integer IDVideo = Integer.parseInt(request.getParameter("ID")); port.agregarVisita(IDVideo, usuarioLogged); request.setAttribute("like_state", port.getEstadoValoracion(IDVideo, usuarioLogged)); // calificacion de estado de valoraci�n. String[] usuariosSeguidores = port.listarUsuariosQueLeSigue(dataVideo.getPropietario()).getItem() .toArray(new String[0]); boolean flagFollow = false; for (int i = 0; i < usuariosSeguidores.length; i++) { if (usuariosSeguidores[i].equals(usuarioLogged)) flagFollow = true; } if (flagFollow == true) request.setAttribute("follow_state", "true"); else request.setAttribute("follow_state", "false"); // calificacion de estado de seguir. DtUsuario usuarioLoggedData = port.listarDatosUsuario(usuarioLogged); request.setAttribute("dataUsuario", usuarioLoggedData); } else { request.setAttribute("logged", false); } if (request.getHeader("User-Agent").indexOf("Mobile") != -1) { request.getRequestDispatcher("/WEB-INF/Video/VerVideoMobile.jsp").forward(request, response); } else { request.getRequestDispatcher("/WEB-INF/Video/VerVideo.jsp").forward(request, response); } } else response.sendRedirect(request.getContextPath() + "/casa"); break; } case "follow": { System.out.println("Quiero seguir a un usuario"); HttpSession session = request.getSession(); String propietario = request.getParameter("usuario_a_seguir"); String nombre_usuario = (String) session.getAttribute("nombre_usuario"); seguirUsuario(nombre_usuario, propietario); break; } case "leaveFollow": { System.out.println("Quiero dejar de seguir a un usuario"); HttpSession session = request.getSession(); String propietario = request.getParameter("usuario_a_seguir"); String nombre_usuario = (String) session.getAttribute("nombre_usuario"); dejarDeSeguirUsuario(nombre_usuario, propietario); break; } case "comment": { System.out.println("Quiero hacer un comentario"); HttpSession session = request.getSession(false); if (session != null && session.getAttribute("nombre_usuario") != null) { String comentador = (String) session.getAttribute("nombre_usuario"); int id_video = Integer.parseInt(request.getParameter("id_video")); String contenido = request.getParameter("contenido"); comentarVideo(id_video, comentador, contenido); System.out.println("comentador " + comentador + " id_video:" + id_video + "contenido: " + contenido); } break; } case "modificarVideo": { // aca modificar video DtVideo dataVideo = (DtVideo) request.getAttribute("dtVideo"); response.getWriter().append("no esta hecho aun"); break; } default: System.out.println("Error"); break; } } /** * @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse * response) */ protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { uytubeLogica.publicar.WebServicesService service = new uytubeLogica.publicar.WebServicesService(); uytubeLogica.publicar.WebServices port = service.getWebServicesPort(); // TODO Auto-generated method stub System.out.println("estoy en videoServlet POST"); String opc = request.getParameter("opcion"); System.out.println(opc); switch (opc) { case "altaVideo": System.out.println("Quiero crear video"); HttpSession session = request.getSession(false); if (session != null && session.getAttribute("nombre_usuario") != null) { String login = (String) session.getAttribute("nombre_usuario"); String nombreVideo = new String(request.getParameter("nombreVideo").getBytes("ISO-8859-1"), "UTF-8"); String duracionVideo = new String(request.getParameter("duracionVideo").getBytes("ISO-8859-1"), "UTF-8"); String urlVideo = request.getParameter("urlVideo"); String fechaVideo = request.getParameter("fechaVideo"); String categoria = new String(request.getParameter("categoria").getBytes("ISO-8859-1"), "UTF-8") .replace("||", " "); System.out.println("hola pepito " + categoria); System.out.println("hola pepote " + request.getParameter("categoria")); String descVideo = new String(request.getParameter("descVideo").getBytes("ISO-8859-1"), "UTF-8"); if (nombreVideo != "" && duracionVideo != "" && isInteger(duracionVideo) && urlVideo != "" && fechaVideo != "" && descVideo != "") { crearVideo(login, nombreVideo, duracionVideo, urlVideo, fechaVideo, categoria, descVideo); response.sendRedirect(request.getContextPath() + "/casa"); } else { response.getWriter().append("Error, verifique los campos nuevamente"); } } else { response.sendRedirect(request.getContextPath() + "/casa"); } break; case "modificarVideo": { // aca modificar video DtVideo dataVideo = (DtVideo) request.getAttribute("dtVideo"); response.getWriter().append("no esta hecho aun"); break; } default: System.out.println("Error"); break; } } } <file_sep>package uytube.admin.videos.alta; import java.awt.Container; import javax.swing.JInternalFrame; public final class AltaVideoInternalFrame { private final JInternalFrame internalFrame = new JInternalFrame(); private final Container container; public AltaVideoInternalFrame(final Container container) { this.container = container; this.container.add(internalFrame); initializeInternalFrame(); } private void initializeInternalFrame() { internalFrame.setVisible(false); internalFrame.setTitle("Alta Video"); internalFrame.setClosable(true); internalFrame.setResizable(true); internalFrame.setSize(330, 300); addContentToInternalFrame(); } private void addContentToInternalFrame() { final AltaVideoFormPanel altaVideoFormPanel = new AltaVideoFormPanel(internalFrame); internalFrame.add(altaVideoFormPanel.getPanel()); } public void show() { internalFrame.show(); } public void hide() { internalFrame.hide(); } } <file_sep><%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <!DOCTYPE html> <html> <head> <link rel="stylesheet" href="agregarVideoListaReproduccion.css"> <meta charset="UTF-8"> <title>UyTube</title> </head> <body> <br><br><br> <form> Video: <select style="width:25%" name="cmbVideo"> <option value="v1">v1</option> <option value="v2">v2</option> <option value="v3">v3</option> <option value="v4">v4</option> </select><br><br> Seleccione una lista: <select style="width:25%" name="cmbLista"> <option value="l1">l1</option> <option value="l2">l2</option> <option value="l3">l3</option> <option value="l4">l4</option> </select><br><br> <input type="submit" value="Agregar"> </form><br><br> </body> </html><file_sep>package uytubeLogic.logica; import org.hsqldb.lib.Iterator; import uytubeLogic.logica.SystemHandler.Privacidad; public class UsuarioCtrl implements IUsuarioCtrl { private static UsuarioCtrl instance = null; private UsuarioHandler usuarioh; private SystemHandler systemh; public Boolean memberListaReproduccionDefecto(String nombreLista) { return systemh.memberListaReproduccionDefecto(nombreLista); } public Boolean memberUsuario(String nickU) { return usuarioh.memberNickname(nickU); } private UsuarioCtrl() { usuarioh = UsuarioHandler.getInstance(); systemh = SystemHandler.getInstance(); } public Integer getTamanioArbol(DtComentario[] comments) { Integer tamanio = 0; for (DtComentario entry : comments) { tamanio = tamanio + entry.getTamanioArbol(); } return tamanio; }// Carmona: lo siento pero lo tengo que usar desde una JSP, entonces no me // conviene que est� en el controlador. Att.: Marco public Boolean memberListaReproduccionPropia(String nickU, String nombreLista) { UsuarioHandler usuHandler = UsuarioHandler.getInstance(); Usuario usuarioParticular = usuHandler.find(nickU); return usuarioParticular.memberListaReproduccionPropia(nombreLista); } public static UsuarioCtrl getInstance() { if (instance == null) instance = new UsuarioCtrl(); return instance; } public String[] listarNicknamesUsuarios() { return usuarioh.listarNicknamesUsuarios(); } public String[] listarVideosCanal(String nickU) { Usuario usuarioParticular = usuarioh.find(nickU); return usuarioParticular.listarVideosCanal(); } public DtVideo[] infoVideosCanal(String filtro, String nickU, Privacidad priv) { Usuario usuarioParticular = usuarioh.find(nickU); return usuarioParticular.infoVideosCanal(filtro, priv); } public boolean nuevaListaPorDefecto(String nombreL) { boolean foundName = false; String[] nicks = usuarioh.listarNicknamesUsuarios(); int i = 0; while (!foundName && i < nicks.length) { Usuario usu = usuarioh.find(nicks[i]); if (usu.memberListaReproduccionPropia(nombreL)) { foundName = true; } i++; } if (!foundName) { DtListaReproduccion listaNueva = new DtListaReproduccion(nombreL); systemh.aniadirListaDefault(listaNueva); for (String nick : nicks) { Usuario usuarioParticular = usuarioh.find(nick); usuarioParticular.nuevaListaPorDefecto(nombreL); } } return !foundName; } public void nuevaListaParticular(String nickU, String nombreL, Privacidad privada) { Usuario usuarioParticular = usuarioh.find(nickU); usuarioParticular.nuevaListaParticular(nombreL, nickU, privada); } public String[] listarLDRdeUsuario(String nickU) { Usuario usuarioParticular = usuarioh.find(nickU); return usuarioParticular.listarListasReproduccion(); } public void agregarVideoLista(String nickU, Integer id_video, String nombreLDR) { Usuario usuarioParticular = usuarioh.find(nickU); usuarioParticular.agregarVideoLDR(id_video, nombreLDR); }// comentarle a Carmona sobre el cambio con la id. public void eliminarVideoLista(String nickU, Integer id_video, String nombreLDR) { Usuario usuarioParticular = usuarioh.find(nickU); usuarioParticular.eliminarVideoLista(id_video, nombreLDR); } public void cambiarPrivLDR(String nickU, String nombreL, Privacidad privE) { Usuario usuarioParticular = usuarioh.find(nickU); usuarioParticular.cambiarPrivLDR(nombreL, privE); } public void editarDatosUsuario(String nickU, String nuevoNombreU, String nuevoApellidoU, DtFecha nuevaFechaNacimientoU, byte[] nuevaFotoU) { Usuario usuarioParticular = usuarioh.find(nickU); usuarioParticular.editarDatosUsuario(nuevoNombreU, nuevoApellidoU, nuevaFechaNacimientoU, nuevaFotoU); } public void seguirUsuario(String Usu1, String Usu2) { Usuario uraiz = usuarioh.find(Usu1); Usuario udestino = usuarioh.find(Usu2); uraiz.aniadirUsuarioASeguir(udestino); udestino.aniadirUsuarioQueLeSigue(uraiz); } public void dejarUsuario(String Usu1, String Usu2) { Usuario uraiz = usuarioh.find(Usu1); Usuario udestino = usuarioh.find(Usu2); uraiz.removerUsuarioASeguir(udestino); udestino.removerUsuarioQueLeSigue(uraiz); } public void aniadirVideo(String nickU, String nombreV, String descripcionV, Integer duracionV, DtFecha fechaPublicacion, String url, DtCategoria catE, Privacidad privacidadV) { Usuario usuarioParticular = usuarioh.find(nickU); System.out.println("la url es " + url); if (url.contains("https://www.youtube.com/watch?v=")) { System.out.println("contiene el watch"); url = url.replace("https://www.youtube.com/watch?v=", "https://www.youtube.com/embed/"); System.out.println("ahora es " + url); } else if (url.contains("https://youtu.be/")) { System.out.println("contiene el punto"); url = url.replace("https://youtu.be/", "https://www.youtube.com/embed/"); System.out.println("ahora es " + url); } usuarioParticular.aniadirVideo(nombreV, nickU, descripcionV, duracionV, fechaPublicacion, url, catE, privacidadV); } public void ingresarNuevosDatosVideo(String nickU, String nuevoNombre, String nuevaDescripcion, int nuevaDuracion, DtFecha nuevaFechaPublicacion, String nuevaUrl, DtCategoria nuevaCat, Privacidad nuevaPrivacidad) { Usuario usuarioParticular = usuarioh.find(nickU); usuarioParticular.ingresarNuevosDatosVideo(nuevoNombre, nuevaDescripcion, nuevaDuracion, nuevaFechaPublicacion, nuevaUrl, nuevaCat, nuevaPrivacidad); } public Boolean verificarDispUsuario(String nickU, String email) { Boolean flag = usuarioh.memberNickname(nickU) || usuarioh.memberEmail(email); return !flag; }// true si está disponible, false si ya está ocupado public void nuevoUsuario(String nickU, String pass, String nombreUsuario, String apellidoU, String emailU, DtFecha fechaNacimientoU, byte[] fotoU, String nombreCanal, String descripcionCanal, Privacidad privacidadCanal, String categoriaCanal) { Usuario usuarioParticular = new Usuario(nickU, pass, nombreUsuario, apellidoU, emailU, fechaNacimientoU, fotoU, nombreCanal, descripcionCanal, privacidadCanal, categoriaCanal); usuarioh.aniadirUsuario(usuarioParticular); } public DtListaReproduccion infoAdicLDR(String nickU, String nombreL) { Usuario usuarioParticular = usuarioh.find(nickU); return usuarioParticular.verDetallesListareproduccion(nombreL); } public DtUsuario listarDatosUsuario(String nickU) { Usuario usuarioParticular = usuarioh.find(nickU); return usuarioParticular.listarDatosUsuario(); } public DtCanal mostrarInfoCanal(String nickU) { Usuario usuarioParticular = usuarioh.find(nickU); return usuarioParticular.mostrarInfoCanal(); } public DtVideo obtenerInfoAdicVideo(String nickname, String nombreVideo) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.obtenerInfoAdicVideo(nombreVideo); } public Boolean memberVideoEnUsuario(String nickname, String nombreVideo) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.memberVideoEnUsuario(nombreVideo); } public String[] listarUsuariosQueSigue(String nickname) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.listarUsuariosQueSigue(); } public String[] listarUsuariosQueLeSigue(String nickname) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.listarUsuariosQueLeSigue(); } public String[] listarVideosListaReproduccionUsuario(String nickname, String nombreLista) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.listarVideosListaReproduccionUsuario(nombreLista); } public DtVideo[] obtenerDtsVideosListaReproduccionUsuario(String nickname, String nombreLista) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.obtenerDtsVideosListaReproduccionUsuario(nombreLista); } public boolean memberVideoLista(String nicknameUsuario, int idVideo, String nombreListaReproduccion) { Usuario usuarioParticular = usuarioh.find(nicknameUsuario); return usuarioParticular.memberVideoLista(idVideo, nombreListaReproduccion); } public void modificarDatosCanal(String nickname, String nombreCanal, String descripcion, Privacidad privacidad, String catE) { Usuario usuarioParticular = usuarioh.find(nickname); usuarioParticular.modificarDatosCanal(nombreCanal, descripcion, privacidad, catE); } public String[] listarLDRParticularesdeUsuario(String nickname) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.listarLDRParticularesdeUsuario(); } public DtListaReproduccion[] listarLDRPublicasPorNombre(String nombre) { return usuarioh.listarLDRPublicasPorNombre(nombre); } @Override public DtCanal[] listarCanalesPublicosPorNombre(String nombre) { return usuarioh.listarCanalesPublicosPorNombre(nombre); } @Override public DtListaReproduccion[] infoLDRdeUsuario(String filtro, String nickU, Privacidad priv) { Usuario usuarioParticular = usuarioh.find(nickU); return usuarioParticular.infoLDRdeUsuario(filtro, priv); } public boolean verificarLogin(String nick, String pass) { Usuario usuarioParticular = usuarioh.find(nick); return usuarioParticular != null && usuarioParticular.getPassword().equals(pass); } @Override public void bajaUsuario(String nick) { Usuario usrEliminar = usuarioh.find(nick); //dejar de seguir a usr String [] seguidos = listarUsuariosQueSigue(nick); for(String entry: seguidos){ dejarUsuario(nick, entry); } String [] seguidores = listarUsuariosQueLeSigue(nick); for(String entry: seguidores){ dejarUsuario(entry,nick); } //quitar videos listas de rep String[] listas = listarLDRdeUsuario(nick); for(String nomLista: listas){ usrEliminar.eliminarTodosVideoLista(nomLista); //eliminar lista de rep?? } //borrar comentarios en otros videos y valoraciones VideoHandler videoH = VideoHandler.getInstance(); videoH.eliminarComentariosYValoraciones(usrEliminar); //quitar videos canal String[] videosCanal = listarVideosCanal(nick); for(String nomVideo:videosCanal){ // borrar comentarios de sus videos? System.out.println("eliminando video.."+nomVideo); usrEliminar.eliminarVideo(nomVideo); //borra video del canal y handler } usuarioh.removerUsuario(usrEliminar); } public DtVideoHistorial[] listarVideoHistorial(String nick) { Usuario usuarioParticular = usuarioh.find(nick); return usuarioParticular.listarVideoHistorial(); } public void agregarVisita(Integer id_video, String nick) { Usuario usuarioParticular = usuarioh.find(nick); usuarioParticular.agregarVisita(id_video); } public uyTubePersistencia.Usuario persistirUsuario(String nickname) { Usuario usuarioParticular = usuarioh.find(nickname); return usuarioParticular.persistir(); } } <file_sep>package uytube.admin.usuarios; import java.awt.EventQueue; import javax.swing.DefaultListModel; import javax.swing.JInternalFrame; import java.awt.FlowLayout; import java.util.Map; import java.util.Map.Entry; import javax.swing.JLabel; import javax.swing.JList; import javax.swing.ListSelectionModel; import uyTubePersistencia.Canal; import uyTubePersistencia.ListaReproduccion; import uyTubePersistencia.PersistenciaCtrl; import uyTubePersistencia.Usuario; import uyTubePersistencia.Video; import uytube.admin.adminPrincipal; import uytube.admin.listas.ConsultaListaInternalFrame; import uytubeLogic.logica.Puntuacion; import uytubeLogic.logica.SystemHandler.Privacidad; import javax.swing.JTable; import javax.swing.table.DefaultTableModel; import javax.swing.border.TitledBorder; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.awt.GridLayout; import javax.swing.JButton; import java.awt.event.ActionListener; import java.awt.event.ActionEvent; import javax.swing.JPanel; import javax.swing.JTextField; import javax.swing.JTextPane; import javax.swing.JScrollPane; import javax.swing.ScrollPaneConstants; import javax.swing.JComboBox; import com.toedter.calendar.JDateChooser; public class verUsuariosEliminados extends JInternalFrame { private JTable tableUsuarios; private JLabel lblListas; private JTable tableListas; private JTable tableVideos; private JPanel panel_2; private JButton btnVerInfoLista; private JPanel panel_3; private JPanel panel_4; private JButton btnVerInfoVideo; private Map<String, ListaReproduccion> userL; private Map<String, Video> userV; private JPanel panel_5; private JLabel label_8; private JTextField textFieldNombreC; private JLabel label_9; private JTextField textFieldPrivacidad; private JLabel label_10; private JScrollPane scrollPane; private JPanel panel_1; private JLabel label_5; private JLabel label_6; private JLabel label_7; private JTextField textFieldNombre; private JLabel label_11; private JTextField textFieldApellido; private JLabel label_12; private JTextField textFieldNick; private JTextField textFieldEmail; private JTextField textFieldFechanac; private JLabel lblId; private JTextField textFieldIdCanal; private JTextPane textPane; /** * Launch the application. */ public static void main(String[] args) { EventQueue.invokeLater(new Runnable() { public void run() { try { verUsuariosEliminados frame = new verUsuariosEliminados(); frame.setVisible(true); } catch (Exception e) { e.printStackTrace(); } } }); } /** * Create the frame. */ public void limpiarTabla(JTable tabla){ DefaultTableModel tb = (DefaultTableModel) tabla.getModel(); int a = tabla.getRowCount()-1; for (int i = a; i>= 0; i--) { tb.removeRow(tb.getRowCount()-1); } } DefaultTableModel ModeloUsuario() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("ID"); model.addColumn("Nick"); return model; }; DefaultTableModel ModeloListas() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("ID"); model.addColumn("Nombre"); return model; }; DefaultTableModel ModeloVideos() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("ID"); model.addColumn("Nombre"); return model; }; DefaultTableModel ModeloUsuarioDatos() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("Nick"); model.addColumn("Email"); model.addColumn("Nombre"); model.addColumn("APellido"); model.addColumn("Fecha Nacimiento"); return model; }; DefaultTableModel ModeloCanal() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("ID"); model.addColumn("Nombre"); model.addColumn("Descripcion"); model.addColumn("Privacidad"); return model; }; public verUsuariosEliminados() { setTitle("Usuarios Eliminados"); setClosable(true); setResizable(true); setIconifiable(true); setMaximizable(true); setBounds(100, 100, 529, 640); getContentPane().setLayout(new GridLayout(0, 1, 5, 5)); panel_2 = new JPanel(); getContentPane().add(panel_2); panel_2.setLayout(new FlowLayout(FlowLayout.CENTER, 5, 5)); JLabel lblUsuarios = new JLabel("Usuarios"); panel_2.add(lblUsuarios); tableUsuarios = new JTable(ModeloUsuario()); panel_2.add(tableUsuarios); tableUsuarios.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); tableUsuarios.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent arg0) { cargarDatosUsuario(); } }); tableUsuarios.setAutoscrolls(true); panel_1 = new JPanel(); panel_1.setBorder(new TitledBorder(null, "Datos usuario", TitledBorder.LEADING, TitledBorder.TOP, null, null)); getContentPane().add(panel_1); panel_1.setLayout(new GridLayout(0, 2, 2, 1)); label_5 = new JLabel("Nickname"); panel_1.add(label_5); textFieldNick = new JTextField(); textFieldNick.setEditable(false); textFieldNick.setColumns(10); panel_1.add(textFieldNick); label_6 = new JLabel("Email"); panel_1.add(label_6); textFieldEmail = new JTextField(); textFieldEmail.setText(""); textFieldEmail.setEditable(false); textFieldEmail.setColumns(10); panel_1.add(textFieldEmail); label_7 = new JLabel("Nombre"); panel_1.add(label_7); textFieldNombre = new JTextField(); textFieldNombre.setText(""); textFieldNombre.setEditable(false); textFieldNombre.setColumns(10); panel_1.add(textFieldNombre); label_11 = new JLabel("Apellido"); panel_1.add(label_11); textFieldApellido = new JTextField(); textFieldApellido.setText(""); textFieldApellido.setEditable(false); textFieldApellido.setColumns(10); panel_1.add(textFieldApellido); label_12 = new JLabel("Fecha Nac."); panel_1.add(label_12); textFieldFechanac = new JTextField(); textFieldFechanac.setEditable(false); panel_1.add(textFieldFechanac); textFieldFechanac.setColumns(10); panel_5 = new JPanel(); panel_5.setBorder(new TitledBorder(null, "Datos canal", TitledBorder.LEADING, TitledBorder.TOP, null, null)); getContentPane().add(panel_5); panel_5.setLayout(new GridLayout(0, 2, 2, 5)); lblId = new JLabel("ID"); panel_5.add(lblId); textFieldIdCanal = new JTextField(); textFieldIdCanal.setEditable(false); panel_5.add(textFieldIdCanal); textFieldIdCanal.setColumns(10); label_8 = new JLabel("Nombre"); panel_5.add(label_8); textFieldNombreC = new JTextField(); textFieldNombreC.setText(""); textFieldNombreC.setEditable(false); textFieldNombreC.setColumns(10); panel_5.add(textFieldNombreC); label_9 = new JLabel("Privacidad"); panel_5.add(label_9); textFieldPrivacidad = new JTextField(); textFieldPrivacidad.setText((String) null); textFieldPrivacidad.setEditable(false); textFieldPrivacidad.setColumns(10); panel_5.add(textFieldPrivacidad); label_10 = new JLabel("Descripición"); panel_5.add(label_10); scrollPane = new JScrollPane(); scrollPane.setVerticalScrollBarPolicy(ScrollPaneConstants.VERTICAL_SCROLLBAR_ALWAYS); panel_5.add(scrollPane); textPane = new JTextPane(); textPane.setText(""); textPane.setEditable(false); scrollPane.setViewportView(textPane); panel_3 = new JPanel(); getContentPane().add(panel_3); panel_3.setLayout(new FlowLayout(FlowLayout.CENTER, 5, 5)); lblListas = new JLabel("Listas"); panel_3.add(lblListas); tableListas = new JTable(ModeloListas()); panel_3.add(tableListas); tableListas.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); btnVerInfoLista = new JButton("Ver Info Lista"); btnVerInfoLista.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { if(tableListas.getSelectedRow()>=0){ DefaultTableModel tm = (DefaultTableModel) tableListas.getModel(); String idLista= (String) tm.getValueAt(tableListas.getSelectedRow(),1); ListaReproduccion listaR = userL.get(idLista); VerInfoListaEliminada listaIFrame = new VerInfoListaEliminada(listaR); adminPrincipal.getFrames()[0].setLayout(null); adminPrincipal.getFrames()[0].add(listaIFrame); listaIFrame.show(); } } }); panel_3.add(btnVerInfoLista); panel_4 = new JPanel(); getContentPane().add(panel_4); panel_4.setLayout(new FlowLayout(FlowLayout.CENTER, 5, 5)); JLabel lblVideos = new JLabel("Videos"); panel_4.add(lblVideos); tableVideos = new JTable(ModeloVideos()); panel_4.add(tableVideos); tableVideos.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); btnVerInfoVideo = new JButton("Ver Info Video"); btnVerInfoVideo.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { if(tableVideos.getSelectedRow()>=0){ DefaultTableModel tm = (DefaultTableModel) tableVideos.getModel(); String idVideo= (String) tm.getValueAt(tableVideos.getSelectedRow(),1); Video videoE = userV.get(idVideo); // VerInfoVideoEliminado videoIFrame = new VerInfoVideoEliminado(videoE); // adminPrincipal.getFrames()[0].setLayout(null); // adminPrincipal.getFrames()[0].add(videoIFrame); // videoIFrame.show(); } } }); panel_4.add(btnVerInfoVideo); cargarUsuarios(); } public void cargarUsuarios(){ PersistenciaCtrl p = new PersistenciaCtrl(); Map<Integer, String> usrPersistidos= p.listarUsuariosPersistidos(); DefaultTableModel modelo_usuarios= (DefaultTableModel) tableUsuarios.getModel(); modelo_usuarios.setRowCount(0); if(!usrPersistidos.isEmpty()){ for(Map.Entry<Integer,String> entry : usrPersistidos.entrySet()){ modelo_usuarios.addRow(new Object[]{entry.getKey(),entry.getValue()}); } }else{ modelo_usuarios.addRow(new Object[]{0,"No hay usuarios"}); } } public void cargarDatosUsuario(){ System.out.println("Quiero mostrar datos user"); if(tableUsuarios.getSelectedRow()>=0){ PersistenciaCtrl p = new PersistenciaCtrl(); DefaultTableModel tm = (DefaultTableModel) tableUsuarios.getModel(); Integer dato= (Integer) tm.getValueAt(tableUsuarios.getSelectedRow(),0); Usuario user = p.getInfoUsuario(dato); if(user!= null){ Canal userC = user.getCanalPropio(); //cargar datos usr textFieldApellido.setText(user.getApellido()); textFieldEmail.setText(user.getEmail()); textFieldFechanac.setText(user.getFechaNacimiento()); textFieldNick.setText(user.getNickname()); textFieldNombre.setText(user.getNombre()); //cargar datos canal textFieldNombreC.setText(userC.getNombre()); textFieldIdCanal.setText(Integer.toString(userC.getIdCanal())); if(userC.getPrivacidadCanal() == Privacidad.PRIVADO){ textFieldPrivacidad.setText("Privado"); }else{ textFieldPrivacidad.setText("Publico"); } textPane.setText(userC.getDescripcion()); //cargar listas y videos userL = userC.getListasReproduccion(); userV = userC.getVideos(); DefaultTableModel modeloListas= (DefaultTableModel) tableListas.getModel(); modeloListas.setRowCount(0); DefaultTableModel modeloVideos= (DefaultTableModel) tableVideos.getModel(); modeloVideos.setRowCount(0); for (Entry<String, ListaReproduccion> entry : userL.entrySet()) { modeloListas.addRow(new Object[]{entry.getValue().getIdListaRep(),entry.getValue().getNombre()}); /* for (Entry<Integer, Video> entryV : entry.getValue().getVideos().entrySet()) { modeloVideos.addRow(new Object[]{entryV.getValue().getIdVideo(),entryV.getValue().getNombre()}); }*/ } for (Entry<String, Video> entryV : userV.entrySet()) { modeloVideos.addRow(new Object[]{entryV.getValue().getIdVideo(),entryV.getValue().getNombre()}); } } } } } <file_sep>package uytubeLogic.logica; import uytubeLogic.logica.SystemHandler.Privacidad; public interface IUsuarioCtrl { public abstract String[] listarNicknamesUsuarios(); public abstract String[] listarVideosCanal(String nickU); public abstract boolean nuevaListaPorDefecto(String nombreL); public abstract void nuevaListaParticular(String nickU, String nombreL, Privacidad privada); public abstract String[] listarLDRdeUsuario(String nickU); public abstract void agregarVideoLista(String nickU, Integer id_video, String nombreLDR); public abstract Integer getTamanioArbol(DtComentario[] comments); public abstract void cambiarPrivLDR(String nickU, String nombreL, Privacidad privE); public abstract void seguirUsuario(String Usu1, String Usu2); public abstract void dejarUsuario(String Usu1, String Usu2); public abstract void aniadirVideo(String nickU, String nombreV, String descripcionV, Integer duracionV, DtFecha fechaPublicacionV, String urlV, DtCategoria catV, Privacidad privacidadV); public abstract void ingresarNuevosDatosVideo(String nickU, String nuevoNombreV, String nuevaDescripcionV, int nuevaDuracionV, DtFecha nuevaFechaPublicacionV, String nuevaURL, DtCategoria nuevaCatV, Privacidad nuevaPrivacidadV); public abstract Boolean verificarDispUsuario(String nickU, String email); public abstract void nuevoUsuario(String nickU, String pass, String nombreU, String apellidoU, String emailU, DtFecha fechaNacimientoU, byte[] fotoU, String nombreCanal, String descripcionCanal, Privacidad privacidadCanal, String categoriaCanal); public abstract void editarDatosUsuario(String nickU, String nuevoNombreU, String nuevoApeliidoU, DtFecha nuevaFechaNacU, byte[] nuevaFotoU); public abstract DtListaReproduccion infoAdicLDR(String nickU, String nombreL); public abstract DtUsuario listarDatosUsuario(String nickU); public abstract DtCanal mostrarInfoCanal(String nickU); public abstract Boolean memberListaReproduccionDefecto(String nombreLista); public abstract Boolean memberListaReproduccionPropia(String nickU, String nombreLista); public abstract Boolean memberUsuario(String nickU); public abstract DtVideo obtenerInfoAdicVideo(String nickname, String nombreVideo); public abstract Boolean memberVideoEnUsuario(String nickname, String nombreVideo); public abstract String[] listarUsuariosQueSigue(String nickname); public abstract String[] listarUsuariosQueLeSigue(String nickname); public abstract String[] listarVideosListaReproduccionUsuario(String nickname, String nombreLista); public abstract void eliminarVideoLista(String nickU, Integer id_video, String nombreLDR); public abstract DtVideo[] obtenerDtsVideosListaReproduccionUsuario(String nickname, String nombreLista); public abstract boolean memberVideoLista(String nicknameUsuario, int idVideo, String nombreListaReproduccion); public abstract void modificarDatosCanal(String nickname, String nombreCanal, String descripcion, Privacidad privacidad, String catE); public abstract String[] listarLDRParticularesdeUsuario(String nickname); public abstract DtCanal[] listarCanalesPublicosPorNombre(String nombre); public abstract DtVideo[] infoVideosCanal(String filtro, String nickU, Privacidad priv); public abstract DtListaReproduccion[] infoLDRdeUsuario(String filtro, String nickU, Privacidad priv); public abstract DtListaReproduccion[] listarLDRPublicasPorNombre(String nombre); public abstract boolean verificarLogin(String nick, String pass); public abstract void bajaUsuario(String nick); public abstract void agregarVisita(Integer id_video, String nick); public abstract DtVideoHistorial[] listarVideoHistorial(String nick); public abstract uyTubePersistencia.Usuario persistirUsuario(String nickname); } <file_sep> package uytubeLogica.publicar; /** * Please modify this class to meet your needs * This class is not complete */ import java.io.File; import java.net.MalformedURLException; import java.net.URL; import javax.xml.namespace.QName; import javax.jws.WebMethod; import javax.jws.WebParam; import javax.jws.WebResult; import javax.jws.WebService; import javax.jws.soap.SOAPBinding; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.ws.Action; /** * This class was generated by Apache CXF 3.2.6 * 2018-11-14T13:58:41.679-03:00 * Generated source version: 3.2.6 * */ public final class WebServices_WebServicesPort_Client { private static final QName SERVICE_NAME = new QName("http://publicar.uytubeLogica/", "WebServicesService"); private WebServices_WebServicesPort_Client() { } public static void main(String args[]) throws java.lang.Exception { URL wsdlURL = WebServicesService.WSDL_LOCATION; if (args.length > 0 && args[0] != null && !"".equals(args[0])) { File wsdlFile = new File(args[0]); try { if (wsdlFile.exists()) { wsdlURL = wsdlFile.toURI().toURL(); } else { wsdlURL = new URL(args[0]); } } catch (MalformedURLException e) { e.printStackTrace(); } } WebServicesService ss = new WebServicesService(wsdlURL, SERVICE_NAME); WebServices port = ss.getWebServicesPort(); { System.out.println("Invoking cambiarPrivLDR..."); java.lang.String _cambiarPrivLDR_arg0 = "_cambiarPrivLDR_arg0-1072235729"; java.lang.String _cambiarPrivLDR_arg1 = "_cambiarPrivLDR_arg11227159394"; uytubeLogica.publicar.Privacidad _cambiarPrivLDR_arg2 = uytubeLogica.publicar.Privacidad.PRIVADO; port.cambiarPrivLDR(_cambiarPrivLDR_arg0, _cambiarPrivLDR_arg1, _cambiarPrivLDR_arg2); } { System.out.println("Invoking operacionPrueba..."); port.operacionPrueba(); } { System.out.println("Invoking listarLDRPublicasPorNombre..."); java.lang.String _listarLDRPublicasPorNombre_arg0 = "_listarLDRPublicasPorNombre_arg0-643517274"; uytubeLogica.publicar.DtListaReproduccionArray _listarLDRPublicasPorNombre__return = port.listarLDRPublicasPorNombre(_listarLDRPublicasPorNombre_arg0); System.out.println("listarLDRPublicasPorNombre.result=" + _listarLDRPublicasPorNombre__return); } { System.out.println("Invoking listarLDRPorCategoria..."); java.lang.String _listarLDRPorCategoria_arg0 = "_listarLDRPorCategoria_arg0739741563"; uytubeLogica.publicar.Privacidad _listarLDRPorCategoria_arg1 = uytubeLogica.publicar.Privacidad.PUBLICO; java.lang.String _listarLDRPorCategoria_arg2 = "_listarLDRPorCategoria_arg2-843542554"; uytubeLogica.publicar.DtListaReproduccionArray _listarLDRPorCategoria__return = port.listarLDRPorCategoria(_listarLDRPorCategoria_arg0, _listarLDRPorCategoria_arg1, _listarLDRPorCategoria_arg2); System.out.println("listarLDRPorCategoria.result=" + _listarLDRPorCategoria__return); } { System.out.println("Invoking verificarDispUsuario..."); java.lang.String _verificarDispUsuario_arg0 = "_verificarDispUsuario_arg0-492884628"; java.lang.String _verificarDispUsuario_arg1 = "_verificarDispUsuario_arg1490620539"; boolean _verificarDispUsuario__return = port.verificarDispUsuario(_verificarDispUsuario_arg0, _verificarDispUsuario_arg1); System.out.println("verificarDispUsuario.result=" + _verificarDispUsuario__return); } { System.out.println("Invoking listarUsuariosQueSigue..."); java.lang.String _listarUsuariosQueSigue_arg0 = "_listarUsuariosQueSigue_arg01819410053"; net.java.dev.jaxb.array.StringArray _listarUsuariosQueSigue__return = port.listarUsuariosQueSigue(_listarUsuariosQueSigue_arg0); System.out.println("listarUsuariosQueSigue.result=" + _listarUsuariosQueSigue__return); } { System.out.println("Invoking listarCanalesPublicosPorNombre..."); java.lang.String _listarCanalesPublicosPorNombre_arg0 = "_listarCanalesPublicosPorNombre_arg0190104812"; uytubeLogica.publicar.DtCanalArray _listarCanalesPublicosPorNombre__return = port.listarCanalesPublicosPorNombre(_listarCanalesPublicosPorNombre_arg0); System.out.println("listarCanalesPublicosPorNombre.result=" + _listarCanalesPublicosPorNombre__return); } { System.out.println("Invoking listarDatosUsuario..."); java.lang.String _listarDatosUsuario_arg0 = "_listarDatosUsuario_arg0-1375768773"; uytubeLogica.publicar.DtUsuario _listarDatosUsuario__return = port.listarDatosUsuario(_listarDatosUsuario_arg0); System.out.println("listarDatosUsuario.result=" + _listarDatosUsuario__return); } { System.out.println("Invoking seguirUsuario..."); java.lang.String _seguirUsuario_arg0 = "_seguirUsuario_arg01443210613"; java.lang.String _seguirUsuario_arg1 = "_seguirUsuario_arg1-274162441"; port.seguirUsuario(_seguirUsuario_arg0, _seguirUsuario_arg1); } { System.out.println("Invoking valorarVideo..."); int _valorarVideo_arg0 = 1419933030; java.lang.String _valorarVideo_arg1 = "_valorarVideo_arg1-406794180"; boolean _valorarVideo_arg2 = true; port.valorarVideo(_valorarVideo_arg0, _valorarVideo_arg1, _valorarVideo_arg2); } { System.out.println("Invoking infoLDRdeUsuario..."); java.lang.String _infoLDRdeUsuario_arg0 = "_infoLDRdeUsuario_arg01750182343"; java.lang.String _infoLDRdeUsuario_arg1 = "_infoLDRdeUsuario_arg1242894855"; uytubeLogica.publicar.Privacidad _infoLDRdeUsuario_arg2 = uytubeLogica.publicar.Privacidad.PUBLICO; uytubeLogica.publicar.DtListaReproduccionArray _infoLDRdeUsuario__return = port.infoLDRdeUsuario(_infoLDRdeUsuario_arg0, _infoLDRdeUsuario_arg1, _infoLDRdeUsuario_arg2); System.out.println("infoLDRdeUsuario.result=" + _infoLDRdeUsuario__return); } { System.out.println("Invoking mostrarInfoCanal..."); java.lang.String _mostrarInfoCanal_arg0 = "_mostrarInfoCanal_arg0-1918009144"; uytubeLogica.publicar.DtCanal _mostrarInfoCanal__return = port.mostrarInfoCanal(_mostrarInfoCanal_arg0); System.out.println("mostrarInfoCanal.result=" + _mostrarInfoCanal__return); } { System.out.println("Invoking agregarVideoLista..."); java.lang.String _agregarVideoLista_arg0 = "_agregarVideoLista_arg0-1975270910"; int _agregarVideoLista_arg1 = -921359534; java.lang.String _agregarVideoLista_arg2 = "_agregarVideoLista_arg22015835635"; port.agregarVideoLista(_agregarVideoLista_arg0, _agregarVideoLista_arg1, _agregarVideoLista_arg2); } { System.out.println("Invoking verDetallesVideoExt..."); int _verDetallesVideoExt_arg0 = -543994059; uytubeLogica.publicar.DtInfoVideo _verDetallesVideoExt__return = port.verDetallesVideoExt(_verDetallesVideoExt_arg0); System.out.println("verDetallesVideoExt.result=" + _verDetallesVideoExt__return); } { System.out.println("Invoking responderComentario..."); int _responderComentario_arg0 = 77276956; int _responderComentario_arg1 = -1181704681; java.lang.String _responderComentario_arg2 = "_responderComentario_arg2-191789415"; uytubeLogica.publicar.DtFecha _responderComentario_arg3 = new uytubeLogica.publicar.DtFecha(); _responderComentario_arg3.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.617-03:00")); java.lang.String _responderComentario_arg4 = "_responderComentario_arg4200268746"; port.responderComentario(_responderComentario_arg0, _responderComentario_arg1, _responderComentario_arg2, _responderComentario_arg3, _responderComentario_arg4); } { System.out.println("Invoking listarVideosPorCategoria..."); java.lang.String _listarVideosPorCategoria_arg0 = "_listarVideosPorCategoria_arg0-51506105"; uytubeLogica.publicar.Privacidad _listarVideosPorCategoria_arg1 = uytubeLogica.publicar.Privacidad.PUBLICO; java.lang.String _listarVideosPorCategoria_arg2 = "_listarVideosPorCategoria_arg2-1235399179"; uytubeLogica.publicar.DtVideoArray _listarVideosPorCategoria__return = port.listarVideosPorCategoria(_listarVideosPorCategoria_arg0, _listarVideosPorCategoria_arg1, _listarVideosPorCategoria_arg2); System.out.println("listarVideosPorCategoria.result=" + _listarVideosPorCategoria__return); } { System.out.println("Invoking nuevoUsuario..."); java.lang.String _nuevoUsuario_arg0 = "_nuevoUsuario_arg055103854"; java.lang.String _nuevoUsuario_arg1 = "_nuevoUsuario_arg11608553735"; java.lang.String _nuevoUsuario_arg2 = "_nuevoUsuario_arg22083148738"; java.lang.String _nuevoUsuario_arg3 = "_nuevoUsuario_arg3-1968314167"; java.lang.String _nuevoUsuario_arg4 = "_nuevoUsuario_arg4634575223"; uytubeLogica.publicar.DtFecha _nuevoUsuario_arg5 = new uytubeLogica.publicar.DtFecha(); _nuevoUsuario_arg5.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.620-03:00")); byte[] _nuevoUsuario_arg6 = new byte[] {}; java.lang.String _nuevoUsuario_arg7 = "_nuevoUsuario_arg7-989417210"; java.lang.String _nuevoUsuario_arg8 = "_nuevoUsuario_arg8847414784"; uytubeLogica.publicar.Privacidad _nuevoUsuario_arg9 = uytubeLogica.publicar.Privacidad.PRIVADO; java.lang.String _nuevoUsuario_arg10 = "_nuevoUsuario_arg10-969873261"; port.nuevoUsuario(_nuevoUsuario_arg0, _nuevoUsuario_arg1, _nuevoUsuario_arg2, _nuevoUsuario_arg3, _nuevoUsuario_arg4, _nuevoUsuario_arg5, _nuevoUsuario_arg6, _nuevoUsuario_arg7, _nuevoUsuario_arg8, _nuevoUsuario_arg9, _nuevoUsuario_arg10); } { System.out.println("Invoking verificarLogin..."); java.lang.String _verificarLogin_arg0 = "_verificarLogin_arg0-1818427100"; java.lang.String _verificarLogin_arg1 = "_verificarLogin_arg1-1606174415"; boolean _verificarLogin__return = port.verificarLogin(_verificarLogin_arg0, _verificarLogin_arg1); System.out.println("verificarLogin.result=" + _verificarLogin__return); } { System.out.println("Invoking nuevaListaParticular..."); java.lang.String _nuevaListaParticular_arg0 = "_nuevaListaParticular_arg078796782"; java.lang.String _nuevaListaParticular_arg1 = "_nuevaListaParticular_arg1148960747"; uytubeLogica.publicar.Privacidad _nuevaListaParticular_arg2 = uytubeLogica.publicar.Privacidad.PUBLICO; port.nuevaListaParticular(_nuevaListaParticular_arg0, _nuevaListaParticular_arg1, _nuevaListaParticular_arg2); } { System.out.println("Invoking nuevoComentario..."); int _nuevoComentario_arg0 = 287137333; java.lang.String _nuevoComentario_arg1 = "_nuevoComentario_arg1327227570"; uytubeLogica.publicar.DtFecha _nuevoComentario_arg2 = new uytubeLogica.publicar.DtFecha(); _nuevoComentario_arg2.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.622-03:00")); java.lang.String _nuevoComentario_arg3 = "_nuevoComentario_arg31218084997"; port.nuevoComentario(_nuevoComentario_arg0, _nuevoComentario_arg1, _nuevoComentario_arg2, _nuevoComentario_arg3); } { System.out.println("Invoking eliminarVideoLista..."); java.lang.String _eliminarVideoLista_arg0 = "_eliminarVideoLista_arg0-1212959072"; int _eliminarVideoLista_arg1 = -2046931310; java.lang.String _eliminarVideoLista_arg2 = "_eliminarVideoLista_arg21432646140"; port.eliminarVideoLista(_eliminarVideoLista_arg0, _eliminarVideoLista_arg1, _eliminarVideoLista_arg2); } { System.out.println("Invoking listarVideoListaReproduccion..."); java.lang.String _listarVideoListaReproduccion_arg0 = "_listarVideoListaReproduccion_arg01027966697"; java.lang.String _listarVideoListaReproduccion_arg1 = "_listarVideoListaReproduccion_arg1917926675"; uytubeLogica.publicar.DtVideoArray _listarVideoListaReproduccion__return = port.listarVideoListaReproduccion(_listarVideoListaReproduccion_arg0, _listarVideoListaReproduccion_arg1); System.out.println("listarVideoListaReproduccion.result=" + _listarVideoListaReproduccion__return); } { System.out.println("Invoking getEstadoValoracion..."); int _getEstadoValoracion_arg0 = -2087926085; java.lang.String _getEstadoValoracion_arg1 = "_getEstadoValoracion_arg1-1565630471"; java.lang.String _getEstadoValoracion__return = port.getEstadoValoracion(_getEstadoValoracion_arg0, _getEstadoValoracion_arg1); System.out.println("getEstadoValoracion.result=" + _getEstadoValoracion__return); } { System.out.println("Invoking listarCategorias..."); uytubeLogica.publicar.DtCategoriaArray _listarCategorias__return = port.listarCategorias(); System.out.println("listarCategorias.result=" + _listarCategorias__return); } { System.out.println("Invoking memberListaReproduccionPropia..."); java.lang.String _memberListaReproduccionPropia_arg0 = "_memberListaReproduccionPropia_arg074896647"; java.lang.String _memberListaReproduccionPropia_arg1 = "_memberListaReproduccionPropia_arg11075502267"; boolean _memberListaReproduccionPropia__return = port.memberListaReproduccionPropia(_memberListaReproduccionPropia_arg0, _memberListaReproduccionPropia_arg1); System.out.println("memberListaReproduccionPropia.result=" + _memberListaReproduccionPropia__return); } { System.out.println("Invoking listarComentarios..."); int _listarComentarios_arg0 = -307412844; uytubeLogica.publicar.DtComentarioArray _listarComentarios__return = port.listarComentarios(_listarComentarios_arg0); System.out.println("listarComentarios.result=" + _listarComentarios__return); } { System.out.println("Invoking obtenerDtsVideosListaReproduccionUsuario..."); java.lang.String _obtenerDtsVideosListaReproduccionUsuario_arg0 = "_obtenerDtsVideosListaReproduccionUsuario_arg0-2023825120"; java.lang.String _obtenerDtsVideosListaReproduccionUsuario_arg1 = "_obtenerDtsVideosListaReproduccionUsuario_arg1835243095"; uytubeLogica.publicar.DtVideoArray _obtenerDtsVideosListaReproduccionUsuario__return = port.obtenerDtsVideosListaReproduccionUsuario(_obtenerDtsVideosListaReproduccionUsuario_arg0, _obtenerDtsVideosListaReproduccionUsuario_arg1); System.out.println("obtenerDtsVideosListaReproduccionUsuario.result=" + _obtenerDtsVideosListaReproduccionUsuario__return); } { System.out.println("Invoking infoListaReproduccion..."); java.lang.String _infoListaReproduccion_arg0 = "_infoListaReproduccion_arg0-48989563"; java.lang.String _infoListaReproduccion_arg1 = "_infoListaReproduccion_arg1-1858211275"; uytubeLogica.publicar.DtListaReproduccion _infoListaReproduccion__return = port.infoListaReproduccion(_infoListaReproduccion_arg0, _infoListaReproduccion_arg1); System.out.println("infoListaReproduccion.result=" + _infoListaReproduccion__return); } { System.out.println("Invoking aniadirVideo..."); java.lang.String _aniadirVideo_arg0 = "_aniadirVideo_arg0814548902"; java.lang.String _aniadirVideo_arg1 = "_aniadirVideo_arg1-113898942"; java.lang.String _aniadirVideo_arg2 = "_aniadirVideo_arg21892377933"; int _aniadirVideo_arg3 = -1018891026; uytubeLogica.publicar.DtFecha _aniadirVideo_arg4 = new uytubeLogica.publicar.DtFecha(); _aniadirVideo_arg4.setFecha(javax.xml.datatype.DatatypeFactory.newInstance().newXMLGregorianCalendar("2018-10-28T15:58:54.625-03:00")); java.lang.String _aniadirVideo_arg5 = "_aniadirVideo_arg5432746039"; uytubeLogica.publicar.DtCategoria _aniadirVideo_arg6 = new uytubeLogica.publicar.DtCategoria(); _aniadirVideo_arg6.setNombre("Nombre-1207070528"); uytubeLogica.publicar.Privacidad _aniadirVideo_arg7 = uytubeLogica.publicar.Privacidad.PUBLICO; port.aniadirVideo(_aniadirVideo_arg0, _aniadirVideo_arg1, _aniadirVideo_arg2, _aniadirVideo_arg3, _aniadirVideo_arg4, _aniadirVideo_arg5, _aniadirVideo_arg6, _aniadirVideo_arg7); } { System.out.println("Invoking listarUsuariosQueLeSigue..."); java.lang.String _listarUsuariosQueLeSigue_arg0 = "_listarUsuariosQueLeSigue_arg0-1540663158"; net.java.dev.jaxb.array.StringArray _listarUsuariosQueLeSigue__return = port.listarUsuariosQueLeSigue(_listarUsuariosQueLeSigue_arg0); System.out.println("listarUsuariosQueLeSigue.result=" + _listarUsuariosQueLeSigue__return); } { System.out.println("Invoking infoVideosCanal..."); java.lang.String _infoVideosCanal_arg0 = "_infoVideosCanal_arg02068661218"; java.lang.String _infoVideosCanal_arg1 = "_infoVideosCanal_arg1-529640127"; uytubeLogica.publicar.Privacidad _infoVideosCanal_arg2 = uytubeLogica.publicar.Privacidad.PRIVADO; uytubeLogica.publicar.DtVideoArray _infoVideosCanal__return = port.infoVideosCanal(_infoVideosCanal_arg0, _infoVideosCanal_arg1, _infoVideosCanal_arg2); System.out.println("infoVideosCanal.result=" + _infoVideosCanal__return); } { System.out.println("Invoking listarLDRdeUsuario..."); java.lang.String _listarLDRdeUsuario_arg0 = "_listarLDRdeUsuario_arg01853859544"; net.java.dev.jaxb.array.StringArray _listarLDRdeUsuario__return = port.listarLDRdeUsuario(_listarLDRdeUsuario_arg0); System.out.println("listarLDRdeUsuario.result=" + _listarLDRdeUsuario__return); } { System.out.println("Invoking infoAddVideo..."); int _infoAddVideo_arg0 = 1251173204; uytubeLogica.publicar.DtVideo _infoAddVideo__return = port.infoAddVideo(_infoAddVideo_arg0); System.out.println("infoAddVideo.result=" + _infoAddVideo__return); } { System.out.println("Invoking cargarDatos..."); port.cargarDatos(); } { System.out.println("Invoking listarVideosPublicosPorNombre..."); java.lang.String _listarVideosPublicosPorNombre_arg0 = "_listarVideosPublicosPorNombre_arg028465617"; uytubeLogica.publicar.DtVideoArray _listarVideosPublicosPorNombre__return = port.listarVideosPublicosPorNombre(_listarVideosPublicosPorNombre_arg0); System.out.println("listarVideosPublicosPorNombre.result=" + _listarVideosPublicosPorNombre__return); } { System.out.println("Invoking dejarUsuario..."); java.lang.String _dejarUsuario_arg0 = "_dejarUsuario_arg0610707597"; java.lang.String _dejarUsuario_arg1 = "_dejarUsuario_arg1-681665690"; port.dejarUsuario(_dejarUsuario_arg0, _dejarUsuario_arg1); } System.exit(0); } } <file_sep>package uytube.admin.usuarios; import java.awt.EventQueue; import javax.swing.JInternalFrame; import java.awt.GridLayout; import java.util.Map; import java.util.Map.Entry; import javax.swing.JPanel; import javax.swing.JLabel; import javax.swing.JTable; import javax.swing.table.DefaultTableModel; import uyTubePersistencia.ListaReproduccion; import uyTubePersistencia.Video; import uytube.admin.adminPrincipal; import uytubeLogic.logica.ListaReproduccion.TipoLista; import uytubeLogic.logica.SystemHandler.Privacidad; import javax.swing.JButton; import java.awt.event.ActionListener; import java.awt.event.ActionEvent; import javax.swing.JTextField; public class VerInfoListaEliminada extends JInternalFrame { private JTable tableVideos; private Map<Integer, Video> userV; private JTextField textFieldNombre; private JTextField textFieldTipo; private JTextField textFieldprivacidad; private JTextField textFieldId; DefaultTableModel ModeloLista() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("ID"); model.addColumn("Nombre"); model.addColumn("Tipo de Lista"); model.addColumn("Privacidad"); return model; }; DefaultTableModel ModeloVideos() { DefaultTableModel model = new DefaultTableModel(); model.addColumn("ID"); model.addColumn("Nombre"); return model; }; /** * Create the frame. */ public VerInfoListaEliminada(ListaReproduccion lista) { setResizable(true); setMaximizable(true); setIconifiable(true); setClosable(true); setBounds(100, 100, 450, 300); getContentPane().setLayout(new GridLayout(1, 0, 5, 5)); JPanel panel = new JPanel(); getContentPane().add(panel); panel.setLayout(new GridLayout(0, 1, 0, 0)); JLabel lblDatosLista = new JLabel("Datos Lista"); panel.add(lblDatosLista); JLabel Id = new JLabel("Id:"); panel.add(Id); textFieldId = new JTextField(); textFieldId.setEditable(false); panel.add(textFieldId); JLabel label = new JLabel("Nombre:"); panel.add(label); textFieldNombre = new JTextField(); textFieldNombre.setEditable(false); panel.add(textFieldNombre); JLabel lblTipo = new JLabel("Tipo:"); panel.add(lblTipo); textFieldTipo = new JTextField(); textFieldTipo.setEditable(false); panel.add(textFieldTipo); JLabel lblPrivacidad = new JLabel("Privacidad:"); panel.add(lblPrivacidad); textFieldprivacidad = new JTextField(); textFieldprivacidad.setEditable(false); panel.add(textFieldprivacidad); JPanel panel_1 = new JPanel(); getContentPane().add(panel_1); JLabel lblVideosDeLa = new JLabel("Videos de la Lista"); panel_1.add(lblVideosDeLa); JButton btnVerInfoVideo = new JButton("Ver Info Video"); btnVerInfoVideo.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { if(tableVideos.getSelectedRow()>=0){ DefaultTableModel tm = (DefaultTableModel) tableVideos.getModel(); Integer idVideo= (Integer) tm.getValueAt(tableVideos.getSelectedRow(),0); Video videoE = userV.get(idVideo); // VerInfoVideoEliminado videoIFrame = new VerInfoVideoEliminado(videoE); // adminPrincipal.getFrames()[0].setLayout(null); // adminPrincipal.getFrames()[0].add(videoIFrame); // videoIFrame.show(); } } }); tableVideos = new JTable(ModeloVideos()); panel_1.add(tableVideos); panel_1.add(btnVerInfoVideo); cargarDatos(lista); } public void cargarDatos(ListaReproduccion lista){ textFieldId.setText(Integer.toString(lista.getIdListaRep())); textFieldNombre.setText(lista.getNombre()); if(lista.getTipo() == TipoLista.PARTICULAR){ textFieldTipo.setText("Particular"); }else{ textFieldTipo.setText("Por defecto"); } if(lista.getPrivado() == Privacidad.PRIVADO){ textFieldprivacidad.setText("Privado"); }else{ textFieldprivacidad.setText("Publico"); } DefaultTableModel modeloVideos= (DefaultTableModel) tableVideos.getModel(); userV = lista.getVideos(); for (Entry<Integer, Video> entryV : userV.entrySet()) { modeloVideos.addRow(new Object[]{entryV.getValue().getIdVideo(),entryV.getValue().getNombre()}); } } } <file_sep>package uytube.admin.videos.modificar; import java.awt.FlowLayout; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import javax.swing.BoxLayout; import javax.swing.DefaultListModel; import javax.swing.JButton; import javax.swing.JInternalFrame; import javax.swing.JLabel; import javax.swing.JList; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JTextField; import uytubeLogic.logica.DtVideo; public final class ElegirAutorPanel { private final JPanel mainPanel = new JPanel(); private final BoxLayout panelLayout = new BoxLayout(mainPanel, BoxLayout.Y_AXIS); private final JTextField authorNicknameTextField = new JTextField(); private Integer selectedVideoId; private DtVideo[] videos = {}; private DefaultListModel<DtVideo> videoListModel = new DefaultListModel<DtVideo>(); private final JButton searchAuthorVideosButton = new JButton("Buscar"); private final JButton editButton = new JButton("Aceptar"); private final JButton cancelButton = new JButton("Cancelar"); private final JInternalFrame internalFrameContainer; public ElegirAutorPanel(final JInternalFrame internalFrameContainer) { this.internalFrameContainer = internalFrameContainer; initializePanel(); } private void initializePanel() { initializeSearchAuthorVideosButton(); initializeCancelButton(); initializePanelLayout(); mainPanel.setLayout(panelLayout); } private void initializeSearchAuthorVideosButton() { searchAuthorVideosButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { final String authorNickname = authorNicknameTextField.getText(); // final DtVideo[] videos = controller.getVideos() // this.videos = videos; updateVideoList(); } }); } private void updateVideoList() { videoListModel.clear(); for (DtVideo video : videos) { videoListModel.addElement(video); } } private void initializeCancelButton() { cancelButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { closeInternalFrameContainer(); } }); } private void closeInternalFrameContainer() { internalFrameContainer.dispose(); } public Integer getSelectedVideoId() { return selectedVideoId; } private void initializePanelLayout() { final JLabel autorNicknameLabel = new JLabel("Nickname del autor"); mainPanel.add(autorNicknameLabel); mainPanel.add(authorNicknameTextField); mainPanel.add(searchAuthorVideosButton); final JList<DtVideo> videoList = new JList<DtVideo>(videoListModel); mainPanel.add(new JScrollPane(videoList)); final JPanel buttonsPanel = new JPanel(); buttonsPanel.setLayout(new FlowLayout()); buttonsPanel.add(editButton); buttonsPanel.add(cancelButton); mainPanel.add(buttonsPanel); } public void setEditButtonActionListener(final ActionListener actionListener) { editButton.addActionListener(actionListener); } public JPanel getPanel() { return mainPanel; } } <file_sep>package uytube.admin.videos; import java.awt.EventQueue; import javax.swing.JInternalFrame; import java.awt.GridLayout; import javax.swing.JLabel; import javax.swing.JOptionPane; import javax.swing.SwingConstants; import com.toedter.calendar.JDateChooser; import uytubeLogic.logica.DtCategoria; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.IUsuarioCtrl; import uytubeLogic.logica.IVideoCtrl; import uytubeLogic.logica.SystemHandler.Privacidad; import javax.swing.JComboBox; import javax.swing.JTextField; import javax.swing.JTextArea; import javax.swing.DefaultComboBoxModel; import javax.swing.JButton; import java.awt.event.ActionListener; import java.awt.event.ActionEvent; import javax.swing.JSpinner; import javax.swing.SpinnerNumberModel; import java.util.Date; import org.eclipse.wb.swing.FocusTraversalOnArray; import java.awt.Component; public class AltaVideo extends JInternalFrame { private JTextField textFieldNombreVideo; private JTextField textFieldURL; private JDateChooser dateChooserFecha; private JTextArea textAreaDesc; private JComboBox comboBoxNicknames; private JComboBox comboBoxCategoria; private static void infoBox(String infoMessage, String titleBar){ JOptionPane.showMessageDialog(null, infoMessage, "" + titleBar, JOptionPane.INFORMATION_MESSAGE); } private void clear() { comboBoxNicknames.setSelectedIndex(-1); textFieldNombreVideo.setText(""); textFieldURL.setText(""); textAreaDesc.setText(""); comboBoxCategoria.setSelectedItem(-1); } /** * Launch the application. */ public static void main(String[] args) { EventQueue.invokeLater(new Runnable() { public void run() { try { AltaVideo frame = new AltaVideo(null,null); frame.setVisible(true); } catch (Exception e) { e.printStackTrace(); } } }); } /** * Create the frame. * @param iCU * @param iCV */ public AltaVideo(IUsuarioCtrl iCU, IVideoCtrl iCV) { setTitle("Alta Video"); setMaximizable(true); setIconifiable(true); setClosable(true); setBounds(100, 100, 450, 300); getContentPane().setLayout(new GridLayout(9, 2, 5, 5)); JLabel lblNicknameAutor = new JLabel("Nickname Autor:"); lblNicknameAutor.setHorizontalAlignment(SwingConstants.CENTER); lblNicknameAutor.setVerticalAlignment(SwingConstants.CENTER); getContentPane().add(lblNicknameAutor); comboBoxNicknames = new JComboBox(); comboBoxNicknames.setEditable(false); String[] nicknamesArray = iCU.listarNicknamesUsuarios(); comboBoxNicknames.setModel(new DefaultComboBoxModel(nicknamesArray)); getContentPane().add(comboBoxNicknames); JLabel lblNombreVideo = new JLabel("Nombre Video:"); lblNombreVideo.setHorizontalAlignment(SwingConstants.CENTER); getContentPane().add(lblNombreVideo); textFieldNombreVideo = new JTextField(); getContentPane().add(textFieldNombreVideo); textFieldNombreVideo.setColumns(10); JLabel lblNewLabel = new JLabel("URL Video:"); lblNewLabel.setHorizontalAlignment(SwingConstants.CENTER); getContentPane().add(lblNewLabel); textFieldURL = new JTextField(); getContentPane().add(textFieldURL); textFieldURL.setColumns(10); JLabel lblDescripcion = new JLabel("Descripcion:"); lblDescripcion.setHorizontalAlignment(SwingConstants.CENTER); getContentPane().add(lblDescripcion); textAreaDesc = new JTextArea(); textAreaDesc.setEditable(true); getContentPane().add(textAreaDesc); JLabel lblDuracion = new JLabel("Duracion:"); lblDuracion.setHorizontalAlignment(SwingConstants.CENTER); getContentPane().add(lblDuracion); JSpinner spinnerDuracion = new JSpinner(); spinnerDuracion.setModel(new SpinnerNumberModel(new Integer(0), new Integer(0), null, new Integer(1))); getContentPane().add(spinnerDuracion); JLabel lblFecha = new JLabel("Fecha:"); lblFecha.setHorizontalAlignment(SwingConstants.CENTER); getContentPane().add(lblFecha); dateChooserFecha = new JDateChooser(); Date fechaHoy = new Date(); dateChooserFecha.setDate(fechaHoy); getContentPane().add(dateChooserFecha); JLabel lblCategoria = new JLabel("Categoria:"); lblCategoria.setHorizontalAlignment(SwingConstants.CENTER); getContentPane().add(lblCategoria); comboBoxCategoria = new JComboBox(); //empiezo a cargar las categorias. DtCategoria[] categoriasDts = iCV.listarCategorias(); String[] nombresCategoriasArray = new String[categoriasDts.length]; for(int i = 0; i < categoriasDts.length; i++) { nombresCategoriasArray[i] = categoriasDts[i].getNombre(); } comboBoxCategoria.setModel(new DefaultComboBoxModel(nombresCategoriasArray)); //termino de cargar las categorias comboBoxCategoria.setSelectedIndex(-1); getContentPane().add(comboBoxCategoria); JLabel lblPrivacidad = new JLabel("Privacidad:"); lblPrivacidad.setHorizontalAlignment(SwingConstants.CENTER); getContentPane().add(lblPrivacidad); JComboBox comboBoxPrivacidad = new JComboBox(); comboBoxPrivacidad.setEnabled(false); comboBoxPrivacidad.setModel(new DefaultComboBoxModel(new String[] {"Privado", "Publico"})); getContentPane().add(comboBoxPrivacidad); JButton btnNewButtonCancelar = new JButton("Cancelar"); btnNewButtonCancelar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { setVisible(false); dispose(); } }); getContentPane().add(btnNewButtonCancelar); JButton btnNewButtonAceptar = new JButton("Aceptar"); btnNewButtonAceptar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { String nickU = (String) comboBoxNicknames.getSelectedItem(); String nom = textFieldNombreVideo.getText(); if(iCU.memberUsuario(nickU) == true) { if(iCU.memberVideoEnUsuario(nickU, nom) != true) { if(!textFieldNombreVideo.getText().equals("")) { if(!textFieldURL.getText().equals("")) { String desc = textAreaDesc.getText(); Integer dur = (Integer) spinnerDuracion.getValue(); DtFecha fp = new DtFecha(dateChooserFecha.getDate()); String url = textFieldURL.getText(); //asigno categoria. DtCategoria catE = null; String nombreCategoria =(String) comboBoxCategoria.getSelectedItem(); //Puede haber un problema aca if(comboBoxCategoria.getSelectedIndex() == -1) { catE = null; }else { Boolean flag = false; int i = 0; while (( i < categoriasDts.length) && (flag == false)){ if(nombreCategoria == categoriasDts[i].getNombre()) { catE = categoriasDts[i]; flag = true; } i++; } } //termino de asignar categoria //Asigno privado Privacidad p = Privacidad.PUBLICO; if(comboBoxPrivacidad.getSelectedIndex()==0){ p = Privacidad.PRIVADO; }else p = Privacidad.PUBLICO; //termino de asignar privado. iCU.aniadirVideo(nickU, nom, desc, dur, fp, url, catE, p); infoBox("Video creado exitosamente","Exito"); clear(); int freshIndex = 0; spinnerDuracion.setValue(Integer.valueOf(freshIndex)); setVisible(false); dispose(); }else infoBox("La URL no puede estar vacia","Error"); }else infoBox("El titulo no puede estar vacio.","Error"); }else infoBox("Ya existe el nombre del video en el canal del usuario seleccionado.","Error"); }else infoBox("No existe el usuario en el sistema.","Error"); } }); getContentPane().add(btnNewButtonAceptar); getContentPane().setFocusTraversalPolicy(new FocusTraversalOnArray(new Component[]{lblNicknameAutor, comboBoxPrivacidad, lblNombreVideo, lblNewLabel, lblDescripcion, lblDuracion, lblFecha, dateChooserFecha.getCalendarButton(), lblCategoria, lblPrivacidad, comboBoxNicknames, textFieldNombreVideo, textFieldURL, textAreaDesc, spinnerDuracion, dateChooserFecha, comboBoxCategoria, btnNewButtonAceptar, btnNewButtonCancelar})); } } <file_sep>package uyTubePersistencia; import java.io.Serializable; import java.util.Map; import javax.persistence.CascadeType; import javax.persistence.CollectionTable; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.JoinTable; import javax.persistence.MapKeyColumn; import javax.persistence.OneToMany; import uytubeLogic.logica.SystemHandler.Privacidad; @Entity public class Canal implements Serializable{ /** * */ private static final long serialVersionUID = 1L; @Id @GeneratedValue(strategy = GenerationType.AUTO) private Integer IdCanal; private String nombre; private String descripcion; private Privacidad privacidadCanal; @OneToMany(cascade=CascadeType.PERSIST) @MapKeyColumn(name="ID_VIDEOS_CANAL", table="Canal_video") @JoinTable(name="Canal_video") private Map<String, Video> videos; @OneToMany(cascade=CascadeType.PERSIST) @MapKeyColumn(name="ID_LISTAS_REP_CANAL",table="Canal_lista") @JoinTable(name="Canal_lista") private Map<String, ListaReproduccion> listasReproduccion; public Canal() { // TODO Auto-generated constructor stub } public Canal(uytubeLogic.logica.Canal canal) { this.setNombre(canal.getNombre()); this.setDescripcion(canal.getDescripcion()); this.setPrivacidadCanal(canal.getPrivacidad()); } public static long getSerialversionuid() { return serialVersionUID; } public Integer getIdCanal() { return IdCanal; } public void setIdCanal(Integer idCanal) { IdCanal = idCanal; } public String getNombre() { return nombre; } public void setNombre(String nombre) { this.nombre = nombre; } public String getDescripcion() { return descripcion; } public void setDescripcion(String descripcion) { this.descripcion = descripcion; } public Privacidad getPrivacidadCanal() { return privacidadCanal; } public void setPrivacidadCanal(Privacidad privacidadCanal) { this.privacidadCanal = privacidadCanal; } public Map<String, Video> getVideos() { return videos; } public void setVideos(Map<String, Video> videos) { this.videos = videos; } public Map<String, ListaReproduccion> getListasReproduccion() { return listasReproduccion; } public void setListasReproduccion(Map<String, ListaReproduccion> listasReproduccion) { this.listasReproduccion = listasReproduccion; } } <file_sep><%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <%@ page import = "uytubeLogica.publicar.DtVideo"%> <%@ page import = "uytubeLogica.publicar.DtCategoria"%> <%@ page import = "uytubeLogica.publicar.DtFecha"%> <%@ page import = "uytubeLogica.publicar.DtCanal"%> <%@page import="uytubeLogica.publicar.DtListaReproduccion"%> <%@ page import = "uytubeLogica.publicar.DtUsuario"%> <%@page import="java.text.SimpleDateFormat"%> <%@page import="java.text.DateFormat"%> <%@page import="java.util.Base64"%> <%@ page import = "java.util.Date"%> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <%@include file = "../cosasComunesDelHead.jsp" %> <link rel="stylesheet" type="text/css" href="media/styles/ConsultaUsuario.css"/> <script type="text/javascript" src="scriptPestanias.js">activarTab()</script> <title>Consulta Usuario</title> </head> <body> <% DtUsuario dataUsuario = (DtUsuario) request.getAttribute("dataUsuario"); String nick = dataUsuario.getNickname(); String nombre = dataUsuario.getNombre(); String apellido = dataUsuario.getApellido(); Date fechaNac = dataUsuario.getFechaNacimiento().getFecha().toGregorianCalendar().getTime(); int dia = fechaNac.getDate(); int mes = fechaNac.getMonth()+1; int anio = fechaNac.getYear() + 1900; byte[] foto = dataUsuario.getFoto(); DtCanal dataCanal = (DtCanal) request.getAttribute("dataCanal"); String nombre_canal = dataCanal.getNombre(); String descCanal= dataCanal.getDescripcion(); String categoriaCanal=dataCanal.getCategoria().getNombre(); //estos son los datos que tienen que ver con el usuario propietario y el video en sí. byte[] fotoByte = dataUsuario.getFoto(); String urlFoto = ""; if(fotoByte != null){ Base64.Encoder encoder = Base64.getEncoder(); String fotoString = encoder.encodeToString(fotoByte); urlFoto = "data:image/png;base64,"+ fotoString; } String [] seguidores = (String[])request.getAttribute("dataSeguidores"); String [] seguidos = (String[])request.getAttribute("dataSeguidos"); String[] listasReproduccion = (String[])request.getAttribute("dataListasReproduccion"); %> <%@include file="../buscador.jsp" %> <div class="main-container"> <%@include file="../sidebar.jsp" %> <div class="main-content"> <div id="contenedor"> <div id="usrDatos"> <table width="80%" id="usrDatosTable"> <tr> <td> <img class="logo" src=<%=urlFoto%> width="100px" height="70px"></img> </td> <td> <h3> <%=nick%> </h3> <br> <%=descCanal%> </td> </tr> <tr> <td>Seguidores: <%=seguidores.length %></td> <td>Seguidos: <%=seguidos.length %></td> </tr> </table> </div> <table class="tabs" data-min="0" data-max="3" width="80%"> <tr> <th class="tabcks">&nbsp;</th> <th class="tabck" id="tabck-0" onclick="activarTab(this)">Datos Personales</th> <th class="tabcks">&nbsp;</th> <th class="tabck" id="tabck-1" onclick="activarTab(this)">Videos</th> <th class="tabcks">&nbsp;</th> <th class="tabck" id="tabck-2" onclick="activarTab(this)">Listas de Reproduccion</th> <th class="tabck" id="tabck-3" onclick="activarTab(this)">Seguidores y Seguidos</th> </tr> <tr class="filadiv"> <td colspan="6" id="tab-0"> <div class="tabdiv" id="tabdiv-0"> <h3>Datos Usuario</h3><br> Nombre: <%=nombre%><br> Apellido:<%=apellido%><br> Fecha Nacimiento:<%=dia %>/<%=mes%>/<%=anio%><br> <h3>Datos Canal</h3><br> Nombre:<%=nombre_canal %> <br> Descripcion:<%=descCanal %><br> Categoria:<%=categoriaCanal %><br> </div> <div class="tabdiv" id="tabdiv-3"> <h3>Seguidores</h3> <% for(String seguidoresUsr: seguidores){ %> <ul> <li><a href="profile?opcion=Perfil&nickname=<%=seguidoresUsr%>"><%=seguidoresUsr%> </a></li> </ul> <% } %> <h3>Seguidos</h3> <%for(String seguidosUsr: seguidos){ %> <ul> <li><a href="profile?opcion=Perfil&nickname=<%=seguidosUsr%>"><%=seguidosUsr%> </a></li> </ul> <% } %> </div> <div class="tabdiv" id="tabdiv-1"> <table class="TablaContenidos"> <tr> <th valign="top"> Nombre </th> <th valign="top"> Descripcion </th> <th valign="top"> </th> </tr> </tr> <% DtVideo[] vid = (DtVideo[]) request.getAttribute("videos"); for (DtVideo entry : vid) { String nombreV=entry.getNombre(); String descV=entry.getDescripcion(); String propietarioV = entry.getPropietario(); request.setAttribute("IDVideo", entry.getIDVideo().toString()); request.setAttribute(nombreV, nombreV); %> <tr class="videoRow"> <td id="NombreTD"><%=nombreV%></td> <td id="DescripcionTD"><%=descV %></td> <td> <form action="watch" method="get"> <input type="hidden" name="opcion" value="ver"> <input type="hidden" name="ID" value="<%=entry.getIDVideo()%>"> <input type="submit" value="Ver Ahora"> </form> </td> </tr> <% } %> </table> </div> <div class="tabdiv" id="tabdiv-2"> <table class="TablaContenidos"> <tr> <th valign="top"> Nombre </th> <th valign="top"> </th> </tr> <% if(request.getAttribute("listas")!=null){ DtListaReproduccion[] listas=(DtListaReproduccion[]) request.getAttribute("listas"); for(DtListaReproduccion entry: listas){%> <tr class="listaRow"> <td id="NombreTD"><%=entry.getNombre()%></td> <td> <form action="playlist" method="get"> <input type="hidden" name="action" value="details"> <input type="hidden" name="nameList" value="<%=entry.getNombre() %>"> <input type="hidden" name="ownerList" value="<%=entry.getPropietario() %>"> <input type="submit" value="Ver Info"> </form> </td> </tr> <% } } %> </table> </div> </td> </tr> </table> </div> </div> </div> </body> </html><file_sep> package uytubeLogica.publicar; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; import javax.xml.datatype.XMLGregorianCalendar; /** * <p>Clase Java para dtCanal complex type. * * <p>El siguiente fragmento de esquema especifica el contenido que se espera que haya en esta clase. * * <pre> * &lt;complexType name="dtCanal"&gt; * &lt;complexContent&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"&gt; * &lt;sequence&gt; * &lt;element name="categoria" type="{http://publicar.uytubeLogica/}dtCategoria" minOccurs="0"/&gt; * &lt;element name="descripcion" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="nombre" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="privado" type="{http://publicar.uytubeLogica/}privacidad" minOccurs="0"/&gt; * &lt;element name="propietario" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="ultimoVideo" type="{http://www.w3.org/2001/XMLSchema}dateTime" minOccurs="0"/&gt; * &lt;/sequence&gt; * &lt;/restriction&gt; * &lt;/complexContent&gt; * &lt;/complexType&gt; * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "dtCanal", propOrder = { "categoria", "descripcion", "nombre", "privado", "propietario", "ultimoVideo" }) public class DtCanal { protected DtCategoria categoria; protected String descripcion; protected String nombre; @XmlSchemaType(name = "string") protected Privacidad privado; protected String propietario; @XmlSchemaType(name = "dateTime") protected XMLGregorianCalendar ultimoVideo; /** * Obtiene el valor de la propiedad categoria. * * @return * possible object is * {@link DtCategoria } * */ public DtCategoria getCategoria() { return categoria; } /** * Define el valor de la propiedad categoria. * * @param value * allowed object is * {@link DtCategoria } * */ public void setCategoria(DtCategoria value) { this.categoria = value; } /** * Obtiene el valor de la propiedad descripcion. * * @return * possible object is * {@link String } * */ public String getDescripcion() { return descripcion; } /** * Define el valor de la propiedad descripcion. * * @param value * allowed object is * {@link String } * */ public void setDescripcion(String value) { this.descripcion = value; } /** * Obtiene el valor de la propiedad nombre. * * @return * possible object is * {@link String } * */ public String getNombre() { return nombre; } /** * Define el valor de la propiedad nombre. * * @param value * allowed object is * {@link String } * */ public void setNombre(String value) { this.nombre = value; } /** * Obtiene el valor de la propiedad privado. * * @return * possible object is * {@link Privacidad } * */ public Privacidad getPrivado() { return privado; } /** * Define el valor de la propiedad privado. * * @param value * allowed object is * {@link Privacidad } * */ public void setPrivado(Privacidad value) { this.privado = value; } /** * Obtiene el valor de la propiedad propietario. * * @return * possible object is * {@link String } * */ public String getPropietario() { return propietario; } /** * Define el valor de la propiedad propietario. * * @param value * allowed object is * {@link String } * */ public void setPropietario(String value) { this.propietario = value; } /** * Obtiene el valor de la propiedad ultimoVideo. * * @return * possible object is * {@link XMLGregorianCalendar } * */ public XMLGregorianCalendar getUltimoVideo() { return ultimoVideo; } /** * Define el valor de la propiedad ultimoVideo. * * @param value * allowed object is * {@link XMLGregorianCalendar } * */ public void setUltimoVideo(XMLGregorianCalendar value) { this.ultimoVideo = value; } } <file_sep><%@page import="java.text.SimpleDateFormat"%> <%@page import="java.text.DateFormat"%> <%@page import="uytubeLogica.publicar.DtListaReproduccion"%> <%@page import="uytubeLogica.publicar.TipoLista"%> <%@ page import = "uytubeLogica.publicar.DtVideo"%> <%@ page import = "uytubeLogica.publicar.Privacidad"%> <%@ page import = "uytubeLogica.publicar.DtCanal"%> <%@ page import = "uytubeLogica.publicar.DtCategoria"%> <%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <%@include file="../buscador.jsp" %> <%@include file="../cosasComunesDelHead.jsp" %> <style>table{ width: 100%; table-layout: fixed; } th { color: white; padding: 15px; text-align: left; border-bottom: 1px solid #ddd; background-color: #ff0000; color: white; vertical-align: text-top; } td { padding: 15px; text-align: left; border-bottom: 1px solid #ddd; } tr:nth-child(even) {background-color: #f2f2f2;}</style> <link rel="stylesheet" href="/media/styles/Busqueda.css"> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Modificar Lista Reprodcuccion</title> </head> <body> <% DtListaReproduccion infoLista = (DtListaReproduccion) request.getAttribute("infoLista"); if(infoLista.getTipoL().equals(TipoLista.PARTICULAR)){ %> <div class="main-content"> <form action="modifyPlaylist" method="get"> Privacidad<br> <input type="radio" name="grupoPrivacidad" value="Publico" checked>Publico<br> <input type="radio" name="grupoPrivacidad" value="Privado">Privado<br> <input type="hidden" name="List" value="<%= infoLista.getNombre()%>"> <input type="hidden" name="action" value="Privacy"> <button type="submit">Modificar</button> </form> </div> <%} %> <br> <table id="TablaContenidos"> <tr> <th valign="top"> Tipo </th> <th valign="top"> Nombre </th> <th valign="top"> Propietario </th> </tr> <% DtVideo[] vid = (DtVideo[]) request.getAttribute("videosLista"); if((String) request.getAttribute("nicknameLogin")!=null){ String nickname=(String) request.getAttribute("nicknameLogin"); for (DtVideo entry : vid) { String nombreV=entry.getNombre(); String descV=entry.getDescripcion(); String propietarioV = entry.getPropietario(); request.setAttribute("IDVideo", entry.getIDVideo().toString()); request.setAttribute(nombreV, nombreV); System.out.println(nickname); System.out.println(entry.getPropietario()); //comentario %> <tr> <td>Videos Lista <form action="removeVidPlaylist" method="get"> <input type="hidden" name="action" value="removeVideo"> <input type="hidden" name="ID" value="<%= entry.getIDVideo()%>"> <input type="hidden" name="List" value="<%= infoLista.getNombre()%>"> <input type="submit" value="Quitar Video"> </form> </td> <td id="NombreTD"><%=nombreV%></td> <td id="PropietarioTD"><%=propietarioV %></td> </tr> <% } } %> </table> </body> </html><file_sep> package uytubeLogica.publicar; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; /** * <p>Clase Java para dtVideo complex type. * * <p>El siguiente fragmento de esquema especifica el contenido que se espera que haya en esta clase. * * <pre> * &lt;complexType name="dtVideo"&gt; * &lt;complexContent&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"&gt; * &lt;sequence&gt; * &lt;element name="categoria" type="{http://publicar.uytubeLogica/}dtCategoria" minOccurs="0"/&gt; * &lt;element name="descripcion" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="duracionSS" type="{http://www.w3.org/2001/XMLSchema}int"/&gt; * &lt;element name="fechaPublicacion" type="{http://publicar.uytubeLogica/}dtFecha" minOccurs="0"/&gt; * &lt;element name="nombre" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="privacidad" type="{http://publicar.uytubeLogica/}privacidad" minOccurs="0"/&gt; * &lt;element name="propietario" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="url" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="iDVideo" type="{http://www.w3.org/2001/XMLSchema}int" minOccurs="0"/&gt; * &lt;/sequence&gt; * &lt;/restriction&gt; * &lt;/complexContent&gt; * &lt;/complexType&gt; * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "dtVideo", propOrder = { "categoria", "descripcion", "duracionSS", "fechaPublicacion", "nombre", "privacidad", "propietario", "url", "idVideo" }) public class DtVideo { protected DtCategoria categoria; protected String descripcion; protected int duracionSS; protected DtFecha fechaPublicacion; protected String nombre; @XmlSchemaType(name = "string") protected Privacidad privacidad; protected String propietario; protected String url; @XmlElement(name = "iDVideo") protected Integer idVideo; /** * Obtiene el valor de la propiedad categoria. * * @return * possible object is * {@link DtCategoria } * */ public DtCategoria getCategoria() { return categoria; } /** * Define el valor de la propiedad categoria. * * @param value * allowed object is * {@link DtCategoria } * */ public void setCategoria(DtCategoria value) { this.categoria = value; } /** * Obtiene el valor de la propiedad descripcion. * * @return * possible object is * {@link String } * */ public String getDescripcion() { return descripcion; } /** * Define el valor de la propiedad descripcion. * * @param value * allowed object is * {@link String } * */ public void setDescripcion(String value) { this.descripcion = value; } /** * Obtiene el valor de la propiedad duracionSS. * */ public int getDuracionSS() { return duracionSS; } /** * Define el valor de la propiedad duracionSS. * */ public void setDuracionSS(int value) { this.duracionSS = value; } /** * Obtiene el valor de la propiedad fechaPublicacion. * * @return * possible object is * {@link DtFecha } * */ public DtFecha getFechaPublicacion() { return fechaPublicacion; } /** * Define el valor de la propiedad fechaPublicacion. * * @param value * allowed object is * {@link DtFecha } * */ public void setFechaPublicacion(DtFecha value) { this.fechaPublicacion = value; } /** * Obtiene el valor de la propiedad nombre. * * @return * possible object is * {@link String } * */ public String getNombre() { return nombre; } /** * Define el valor de la propiedad nombre. * * @param value * allowed object is * {@link String } * */ public void setNombre(String value) { this.nombre = value; } /** * Obtiene el valor de la propiedad privacidad. * * @return * possible object is * {@link Privacidad } * */ public Privacidad getPrivacidad() { return privacidad; } /** * Define el valor de la propiedad privacidad. * * @param value * allowed object is * {@link Privacidad } * */ public void setPrivacidad(Privacidad value) { this.privacidad = value; } /** * Obtiene el valor de la propiedad propietario. * * @return * possible object is * {@link String } * */ public String getPropietario() { return propietario; } /** * Define el valor de la propiedad propietario. * * @param value * allowed object is * {@link String } * */ public void setPropietario(String value) { this.propietario = value; } /** * Obtiene el valor de la propiedad url. * * @return * possible object is * {@link String } * */ public String getUrl() { return url; } /** * Define el valor de la propiedad url. * * @param value * allowed object is * {@link String } * */ public void setUrl(String value) { this.url = value; } /** * Obtiene el valor de la propiedad idVideo. * * @return * possible object is * {@link Integer } * */ public Integer getIDVideo() { return idVideo; } /** * Define el valor de la propiedad idVideo. * * @param value * allowed object is * {@link Integer } * */ public void setIDVideo(Integer value) { this.idVideo = value; } } <file_sep>package uytubeLogic.logica; import java.util.Date; public class DtFecha { private Date fecha; public DtFecha(Date fechaBase) { fecha = fechaBase; } public void setFecha(Date fecha) { this.fecha = fecha; } public Date getFecha() { return fecha; } public DtFecha() { // TODO Auto-generated constructor stub } } <file_sep>package uytubeLogica.publicar; import java.io.IOException; import javax.jws.WebMethod; import javax.jws.WebParam; import javax.jws.WebService; import javax.jws.soap.SOAPBinding; import javax.jws.soap.SOAPBinding.ParameterStyle; import javax.jws.soap.SOAPBinding.Style; import javax.persistence.EntityManager; import javax.persistence.EntityManagerFactory; import javax.persistence.Persistence; import javax.xml.ws.Endpoint; import uytube.datosPrueba.DatosDePrueba; import uytubeLogic.logica.DtCanal; import uytubeLogic.logica.DtCategoria; import uytubeLogic.logica.DtComentario; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.DtInfoVideo; import uytubeLogic.logica.DtListaReproduccion; import uytubeLogic.logica.DtUsuario; import uytubeLogic.logica.DtVideo; import uytubeLogic.logica.DtVideoHistorial; import uytubeLogic.logica.Fabrica; import uytubeLogic.logica.IUsuarioCtrl;//is this necessary? import uytubeLogic.logica.IVideoCtrl;//is this necessary? import uytubeLogic.logica.PropertiesCtrl;//wtf is this import uytubeLogic.logica.SystemHandler.Privacidad; @WebService @SOAPBinding(style = Style.RPC, parameterStyle = ParameterStyle.WRAPPED) public class WebServices { private Endpoint endpoint = null; public WebServices() { } @WebMethod(exclude = true) public void publicar() { PropertiesCtrl prop = PropertiesCtrl.getInstance(); try { System.out.println("la property hostwsdl es: "+prop.getProperty("hostWSDL")); endpoint = Endpoint.publish(prop.getProperty("hostWSDL"), this); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } // VERIFICAR PUERTO @WebMethod(exclude = true) public Endpoint getEndpoint() { return endpoint; } @WebMethod public void operacionPrueba() { System.out.println("LLAMADO A FUNC DEL WEB SERVICE"); } @WebMethod public void cargarDatos() { DatosDePrueba dp = new DatosDePrueba(); try { dp.cargarDatosDePrueba(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } @WebMethod public DtCategoria[] listarCategorias() { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.listarCategorias(); } @WebMethod public DtVideo[] listarVideoListaReproduccion(String propietario, String nombreLista) { Fabrica fabrica = Fabrica.getInstance(); IUsuarioCtrl usuarioCtrl = fabrica.getIUsuarioCtrl(); DtVideo[] videosLista = usuarioCtrl.obtenerDtsVideosListaReproduccionUsuario(propietario, nombreLista); return videosLista; } @WebMethod public DtListaReproduccion infoListaReproduccion(String propietario, String nombreLista) { Fabrica fabrica = Fabrica.getInstance(); IUsuarioCtrl usuarioCtrl = fabrica.getIUsuarioCtrl(); DtListaReproduccion infoLista = usuarioCtrl.infoAdicLDR(propietario, nombreLista); return infoLista; } @WebMethod public DtVideo[] listarVideosPorCategoria(String nomCategoria, Privacidad priv, String login) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.listarVideosPorCategoria(nomCategoria, priv, login); } @WebMethod public DtListaReproduccion[] listarLDRPorCategoria(String nomCategoria, Privacidad priv, String login) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.listarLDRPorCategoria(nomCategoria, priv, login); } @WebMethod public DtVideo[] listarVideosPublicosPorNombre(String nombre) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.listarVideosPublicosPorNombre(nombre); } @WebMethod public DtCanal[] listarCanalesPublicosPorNombre(String nombre) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.listarCanalesPublicosPorNombre(nombre); } @WebMethod public DtListaReproduccion[] listarLDRPublicasPorNombre(String nombre) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.listarLDRPublicasPorNombre(nombre); } @WebMethod public DtVideo[] infoVideosCanal(String filtro, String login, Privacidad priv) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.infoVideosCanal(filtro, login, priv); } @WebMethod public DtListaReproduccion[] infoLDRdeUsuario(String filtro, String login, Privacidad priv) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.infoLDRdeUsuario(filtro, login, priv); } @WebMethod public DtCanal mostrarInfoCanal(String nickname) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.mostrarInfoCanal(nickname); } @WebMethod public DtUsuario listarDatosUsuario(String nickname) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.listarDatosUsuario(nickname); } @WebMethod public String[] listarUsuariosQueLeSigue(String nickname) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.listarUsuariosQueLeSigue(nickname); } @WebMethod public String[] listarUsuariosQueSigue(String nickname) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.listarUsuariosQueSigue(nickname); } @WebMethod public String[] listarLDRdeUsuario(String nickname) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.listarLDRdeUsuario(nickname); } @WebMethod public void responderComentario(Integer id_video, Integer id_comentario, String comentador, DtFecha fechaHoy, String contenido) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); IVI.responderComentario(id_video, id_comentario, comentador, fechaHoy, contenido); } @WebMethod public boolean verificarLogin(String nick, String pass) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.verificarLogin(nick, pass); } @WebMethod public void seguirUsuario(String nombre_usuario, String usuario_a_seguir) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.seguirUsuario(nombre_usuario, usuario_a_seguir); } @WebMethod public void dejarUsuario(String nombre_usuario, String usuario_a_no_seguir) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.dejarUsuario(nombre_usuario, usuario_a_no_seguir); } @WebMethod public boolean verificarDispUsuario(String nick, String email) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.verificarDispUsuario(nick, email); } @WebMethod public void nuevoUsuario(String nickname, String contrasenia, String nombre, String apellido, String email, DtFecha dtFechaNac, byte[] foto, String nomCanal, String descripcion, Privacidad priv, String categoria) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.nuevoUsuario(nickname, contrasenia, nombre, apellido, email, dtFechaNac, foto, nomCanal, descripcion, priv, categoria); } @WebMethod public void aniadirVideo(String login, String nomVideo, String descripcionV, Integer duracion, DtFecha fechaPublicacionV, String url, DtCategoria catV, Privacidad priv) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.aniadirVideo(login, nomVideo, descripcionV, duracion, fechaPublicacionV, url, catV, priv); } @WebMethod public void nuevoComentario(Integer id_video, String comentador, DtFecha fecha, String contenido) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); IVI.nuevoComentario(id_video, comentador, fecha, contenido); } @WebMethod public void valorarVideo(Integer id_video, String nombre_usuario, boolean val) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); IVI.valorarVideo(id_video, nombre_usuario, val); } @WebMethod public DtVideo infoAddVideo(Integer idVid) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.infoAddVideo(idVid); } @WebMethod public DtComentario[] listarComentarios(Integer idVid) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.listarComentarios(idVid); } @WebMethod public DtInfoVideo verDetallesVideoExt(Integer idVid) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.verDetallesVideoExt(idVid); } @WebMethod public String getEstadoValoracion(Integer IDVideo, String nickU) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.getEstadoValoracion(IDVideo, nickU); } @WebMethod public void eliminarVideoLista(String login, Integer id_video, String nombreLista) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.eliminarVideoLista(login, id_video, nombreLista); } @WebMethod public void cambiarPrivLDR(String login, String nombreLista, Privacidad privacidad_lista) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.cambiarPrivLDR(login, nombreLista, privacidad_lista); } @WebMethod public DtVideo[] obtenerDtsVideosListaReproduccionUsuario(String propietarioLista, String nombreLista) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.obtenerDtsVideosListaReproduccionUsuario(propietarioLista, nombreLista); } @WebMethod public void agregarVideoLista(String login, Integer id_video, String nombreLista) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.agregarVideoLista(login, id_video, nombreLista); } @WebMethod public boolean memberListaReproduccionPropia(String nombreUsuario, String nombreLista) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.memberListaReproduccionPropia(nombreUsuario, nombreLista); } @WebMethod public void nuevaListaParticular(String login, String nombreLista, Privacidad priv) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.nuevaListaParticular(login, nombreLista, priv); } @WebMethod public void bajaUsuario(String nick){ Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); EntityManagerFactory emf = Persistence.createEntityManagerFactory("UyTubeJPA"); EntityManager em = emf.createEntityManager(); em.getTransaction().begin(); uyTubePersistencia.Usuario usuarioPersistir= IUI.persistirUsuario(nick); em.persist(usuarioPersistir); em.flush(); em.getTransaction().commit(); IUI.bajaUsuario(nick); } @WebMethod public void agregarVisita(Integer id_video, String nick){ Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); IUI.agregarVisita(id_video, nick); } @WebMethod public DtVideoHistorial[] listarVideoHistorial(String nick) { Fabrica fab = Fabrica.getInstance(); IUsuarioCtrl IUI = fab.getIUsuarioCtrl(); return IUI.listarVideoHistorial(nick); } @WebMethod public boolean memberVideo(Integer idVid) { Fabrica fab = Fabrica.getInstance(); IVideoCtrl IVI = fab.getIVideoCtrl(); return IVI.memberVideo(idVid); } } <file_sep><%@page import="java.util.Locale"%> <%@page import="java.text.SimpleDateFormat"%> <%@page import="java.text.DateFormat"%> <%@page import="uytubeLogica.publicar.DtListaReproduccion"%> <%@ page import = "uytubeLogica.publicar.DtVideo"%> <%@ page import = "uytubeLogica.publicar.DtCanal"%> <%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%> <%@page errorPage="../error/error404.jsp" %> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <%@include file="cosasComunesDelHead.jsp" %> <style>table{ width: 100%; table-layout: fixed; } th { color: white; padding: 15px; text-align: left; border-bottom: 1px solid #ddd; background-color: #ff0000; color: white; vertical-align: text-top; } td { padding: 15px; text-align: left; border-bottom: 1px solid #ddd; } .verAhora{ background-color: #ee0000; border: none; color: white; padding: 15px 32px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; } tr:nth-child(even) {background-color: #f2f2f2;}</style> <%@include file="buscador.jsp" %> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title><%=request.getAttribute("titulo") %></title> </head> <body> <%if (request.getAttribute("videos") != null) {%> <button onclick="toggleVideos()">Ver/Ocultar Videos</button> <%} if (request.getAttribute("listas") != null) { %> <button onclick="toggleListas()">Ver/Ocultar Listas</button> <%} if (request.getAttribute("canales") != null) { %> <button onclick="toggleCanales()">Ver/Ocultar Canales</button> <%} %> <div class="main"> Ordenar por: <select id="Ordenar" class="icon-menu" onchange="sortTable()"> <option value=1>Nombre</option> <option value=5 selected>Fecha Publicacion</option> </select> <% DateFormat df = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss"); DateFormat goodDf = new SimpleDateFormat("EEEEE dd 'de' MMMMM 'del' yyyy",Locale.forLanguageTag("es-ES")); if (request.getAttribute("videos") != null) { %> <table id="TablaContenidos"> <tr> <th valign="top"> Tipo </th> <th valign="top"> Nombre </th> <th valign="top"> Descripcion </th> <th valign="top"> Propietario </th> <th valign="top"> Fecha publicación </th> </tr> <% DtVideo[] vid = (DtVideo[]) request.getAttribute("videos"); for (DtVideo entry : vid) { String nombreV=entry.getNombre(); String descV=entry.getDescripcion(); String propietarioV = entry.getPropietario(); request.setAttribute("IDVideo", entry.getIDVideo().toString()); request.setAttribute(nombreV, nombreV); %> <tr class="videoRow"> <td>Video <form action="watch" method="get"> <input type="hidden" name="opcion" value="ver"> <input type="hidden" name="ID" value="<%=entry.getIDVideo()%>"> <input class="verAhora" type="submit" value="Ver Ahora"> </form> </td> <td id="NombreTD"><%=nombreV%></td> <td id="DescripcionTD"><%=descV %></td> <td id="PropietarioTD"><%=propietarioV %></td> <td id="FechaTD"><%=goodDf.format(entry.getFechaPublicacion().getFecha().toGregorianCalendar().getTime()) %></td> <%String fechaHidden=df.format(entry.getFechaPublicacion().getFecha().toGregorianCalendar().getTime()); %> <td style="display:none;"><%=fechaHidden%></td> </tr> <% } } if(request.getAttribute("listas")!=null){ DtListaReproduccion[] listas=(DtListaReproduccion[]) request.getAttribute("listas"); for(DtListaReproduccion entry: listas){%> <tr class="listaRow"> <td>Lista de Reproduccion <form action="playlist" method="get"> <input type="hidden" name="action" value="details"> <input type="hidden" name="nameList" value="<%=entry.getNombre() %>"> <input type="hidden" name="ownerList" value="<%=entry.getPropietario() %>"> <input class="verAhora" type="submit" value="Ver Info"> </form> </td> <td id="NombreTD"><%=entry.getNombre()%></td> <td id="DescripcionTD"></td> <td id="PropietarioTD"><%=entry.getPropietario() %></td> <td id="FechaTD"><%=goodDf.format(entry.getUltimoVideo().toGregorianCalendar().getTime()) %></td> <%String fechaHidden=df.format(entry.getUltimoVideo().toGregorianCalendar().getTime()); %> <td style="display:none;"><%=fechaHidden%></td> </tr> <% } } if(request.getAttribute("canales")!=null){ for(DtCanal entry:(DtCanal[])request.getAttribute("canales")){ %> <tr class="canalRow"> <td>Canal <form action="profile" method="get"> <input type="hidden" name="opcion" value="Perfil"> <input type="hidden" name="nickname" value="<%=entry.getPropietario()%>"> <input class="verAhora" type="submit" value="Ver Canal"> </form> </td> <td id="NombreTD"><%=entry.getNombre()%></td> <td id="DescripcionTD"><%=entry.getDescripcion() %></td> <td id="PropietarioTD"><%=entry.getPropietario() %></td> <td id="FechaTD"><%=goodDf.format(entry.getUltimoVideo().toGregorianCalendar().getTime()) %></td> <%String fechaHidden=df.format(entry.getUltimoVideo().toGregorianCalendar().getTime()); %> <td style="display:none;"><%=fechaHidden%></td> <% } } %> </tr> </table> </div> <script> function toggleVideos(){ var x = document.getElementsByClassName("videoRow"); if(x[0].style.display != "none"){ for(i=0 ; i < x.length ; i++){ x[i].style.display = "none"; } }else{ for(i=0 ; i < x.length ; i++){ x[i].style.display = "table-row"; } } } function toggleListas(){ var x = document.getElementsByClassName("listaRow"); if(x[0].style.display != "none"){ for(i=0 ; i < x.length ; i++){ x[i].style.display = "none"; } }else{ for(i=0 ; i < x.length ; i++){ x[i].style.display = "table-row"; } } } function toggleCanales(){ var x = document.getElementsByClassName("canalRow"); if(x[0].style.display != "none"){ for(i=0 ; i < x.length ; i++){ x[i].style.display = "none"; } }else{ for(i=0 ; i < x.length ; i++){ x[i].style.display = "table-row"; } } } function sortTable() { var selected = document.getElementById("Ordenar").value; var table, rows, switching, i, x, y, shouldSwitch; table = document.getElementById("TablaContenidos"); switching = true; /* Make a loop that will continue until no switching has been done: */ while (switching) { // Start by saying: no switching is done: switching = false; rows = table.rows; /* Loop through all table rows (except the first, which contains table headers): */ for (i = 1; i < (rows.length - 1); i++) { // Start by saying there should be no switching: shouldSwitch = false; /* Get the two elements you want to compare, one from current row and one from the next: */ x = rows[i].getElementsByTagName("TD")[selected]; y = rows[i + 1].getElementsByTagName("TD")[selected]; // Check if the two rows should switch place: if (x.innerHTML.toLowerCase() > y.innerHTML.toLowerCase()) { // If so, mark as a switch and break the loop: shouldSwitch = true; break; } } if (shouldSwitch) { /* If a switch has been marked, make the switch and mark that a switch has been done: */ rows[i].parentNode.insertBefore(rows[i + 1], rows[i]); switching = true; } } } </script> </body> </html><file_sep>package uytubeLogic.logica; public class DtInfoVideo { private DtVideo infoVideo; private DtComentario[] comentarios; private DtUsuario[] usuariosGusta; private DtUsuario[] usuariosNoGusta; public DtVideo getInfoVideo() { return infoVideo; } public DtInfoVideo() { // TODO Auto-generated constructor stub } public void setInfoVideo(DtVideo infoVideo) { this.infoVideo = infoVideo; } public DtComentario[] getComentarios() { return comentarios; } public void setComentarios(DtComentario[] comentarios) { this.comentarios = comentarios; } public DtUsuario[] getUsuariosGusta() { return usuariosGusta; } public void setUsuariosGusta(DtUsuario[] usuariosGusta) { this.usuariosGusta = usuariosGusta; } public DtUsuario[] getUsuariosNoGusta() { return usuariosNoGusta; } public void setUsuariosNoGusta(DtUsuario[] usuariosNoGusta) { this.usuariosNoGusta = usuariosNoGusta; } public DtInfoVideo(Video videoBase) { infoVideo = new DtVideo(videoBase); comentarios = videoBase.getComentarios(); usuariosGusta = videoBase.getUsuariosPuntuadores(true); usuariosNoGusta = videoBase.getUsuariosPuntuadores(false); } } <file_sep>package uytube.admin.usuarios; import uytube.Imagen; import uytube.admin.adminPrincipal; import uytube.admin.videos.ModificarVideo; import uytubeLogic.logica.DtCanal; import uytubeLogic.logica.DtCategoria; import uytubeLogic.logica.DtFecha; import uytubeLogic.logica.DtListaReproduccion; import uytubeLogic.logica.DtUsuario; import uytubeLogic.logica.Fabrica; import uytubeLogic.logica.IUsuarioCtrl; import uytubeLogic.logica.IVideoCtrl; import uytubeLogic.logica.SystemHandler.Privacidad; import javax.swing.JInternalFrame; import java.awt.FlowLayout; import java.awt.GridBagLayout; import javax.swing.JLabel; import java.awt.GridBagConstraints; import javax.swing.JPanel; import java.awt.GridLayout; import java.awt.Image; import javax.swing.JComboBox; import javax.swing.border.TitledBorder; import javax.swing.JButton; import javax.swing.JTextField; import javax.swing.JOptionPane; import javax.swing.ImageIcon; import javax.swing.UIManager; import java.awt.Color; import com.toedter.calendar.JDateChooser; import java.awt.event.ActionListener; import java.awt.image.BufferedImage; import java.io.File; import java.awt.event.ActionEvent; import javax.swing.JRadioButton; import javax.swing.ButtonGroup; import javax.swing.JScrollPane; import javax.swing.JTextArea; public class modificarUsuario extends JInternalFrame { private JTextField textFieldNombre; private JTextField textFieldApellido; private JTextField textFieldNomCanal; private JTextField textFieldEmail; private final ButtonGroup buttonGroup = new ButtonGroup(); private JDateChooser dateChooser; private JRadioButton rdbtnPrivadoCanal; private JRadioButton rdbtnPublicoCanal; private JComboBox comboBoxVideos; private IUsuarioCtrl controlUsr; private JComboBox comboBoxListas; private final ButtonGroup buttonGroup_1 = new ButtonGroup(); private JButton btnModificar ; private JButton btnCancelar_2; private JTextArea textAreaDesc; private JRadioButton rdbtnPrivadoLista; private JRadioButton rdbtnPublicoLista; private DtUsuario usr; private DtCanal usrCanal; private File archivo; private JLabel lblFoto; private byte[] fotoUsr; private JButton btnElegir; private JComboBox comboBoxCatCanal; private JComboBox comboBoxNick; public static void infoBox(String infoMessage, String titleBar){ JOptionPane.showMessageDialog(null, infoMessage, "" + titleBar, JOptionPane.INFORMATION_MESSAGE); } /** * Create the frame. * @param iCU */ public modificarUsuario(IUsuarioCtrl iCU) { setIconifiable(true); controlUsr = iCU; setTitle("Modificar Usuario"); setResizable(true); setMaximizable(true); setClosable(true); setBounds(100, 100, 495, 410); getContentPane().setLayout(new FlowLayout(FlowLayout.CENTER, 5, 5)); JPanel panelDatosUsuario = new JPanel(); panelDatosUsuario.setBorder(new TitledBorder(null, "Datos usuario", TitledBorder.LEADING, TitledBorder.TOP, null, null)); getContentPane().add(panelDatosUsuario); panelDatosUsuario.setLayout(new GridLayout(0, 2, 2, 1)); JLabel lblNickname = new JLabel("Nickname"); panelDatosUsuario.add(lblNickname); comboBoxNick = new JComboBox(); comboBoxNick.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { String nickU = (String) comboBoxNick.getSelectedItem(); if ((String) comboBoxNick.getSelectedItem() != " " && comboBoxNick.getSelectedIndex()!=-1){ //pedir Dt usr= controlUsr.listarDatosUsuario(nickU); usrCanal = controlUsr.mostrarInfoCanal(nickU); limpiar(); cargarDatos(usr, usrCanal, nickU); } } }); comboBoxNick.setSelectedIndex(-1); panelDatosUsuario.add(comboBoxNick); JLabel lblEmail = new JLabel("Email"); panelDatosUsuario.add(lblEmail); textFieldEmail = new JTextField(); textFieldEmail.setEditable(false); panelDatosUsuario.add(textFieldEmail); textFieldEmail.setColumns(10); JLabel lblNombre = new JLabel("Nombre"); panelDatosUsuario.add(lblNombre); textFieldNombre = new JTextField(); textFieldNombre.setEditable(false); panelDatosUsuario.add(textFieldNombre); textFieldNombre.setColumns(10); JLabel lblApellido = new JLabel("Apellido"); panelDatosUsuario.add(lblApellido); textFieldApellido = new JTextField(); textFieldApellido.setEditable(false); panelDatosUsuario.add(textFieldApellido); textFieldApellido.setColumns(10); JLabel lblFechaNac = new JLabel("Fecha Nac."); panelDatosUsuario.add(lblFechaNac); dateChooser = new JDateChooser(); dateChooser.setEnabled(false); panelDatosUsuario.add(dateChooser); JButton btnCancelar_1 = new JButton("Cancelar"); btnCancelar_1.setEnabled(false); btnCancelar_1.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { textFieldNombre.setEditable(false); textFieldApellido.setEditable(false); dateChooser.setEnabled(false); btnModificar.setText("Modificar"); String nickU = (String)comboBoxNick.getSelectedItem(); if((String)comboBoxNick.getSelectedItem() != " " && comboBoxNick.getSelectedIndex()!=-1){ //pedir Dt usr= controlUsr.listarDatosUsuario(nickU); usrCanal = controlUsr.mostrarInfoCanal(nickU); limpiar(); cargarDatos(usr, usrCanal, nickU); } btnCancelar_1.setEnabled(false); btnElegir.setEnabled(false); } }); btnModificar = new JButton("Modificar"); btnModificar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { String nickU, nom, ape; DtFecha nac; nickU = (String) comboBoxNick.getSelectedItem(); nom = textFieldNombre.getText(); ape = textFieldApellido.getText(); nac = new DtFecha(dateChooser.getDate()); //verificar campos!! if(btnModificar.getText()=="Guardar"){ if(verificarCamposDatosUsu()){ controlUsr.editarDatosUsuario(nickU,nom,ape, nac, Imagen.imagenToByte(archivo)); infoBox("Usuario modificado","Modificar usuario"); textFieldNombre.setEditable(false); textFieldApellido.setEditable(false); dateChooser.setEnabled(false); btnModificar.setText("Modificar"); } btnCancelar_1.setEnabled(false); btnElegir.setEnabled(false); }else if(btnModificar.getText()=="Modificar"){ textFieldNombre.setEditable(true); textFieldApellido.setEditable(true); dateChooser.setEnabled(true); btnCancelar_1.setEnabled(true); btnElegir.setEnabled(true); //((JTextField) dateChooser.getDateEditor()).setEditable(true); btnModificar.setText("Guardar"); } } }); lblFoto = new JLabel("Foto"); panelDatosUsuario.add(lblFoto); btnElegir = new JButton("Elegir"); btnElegir.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { archivo = Imagen.elegirImagen(); if(archivo!=null){ Image imagen = new ImageIcon(archivo.getAbsolutePath()).getImage(); lblFoto.setSize(30,30); ImageIcon icono = new ImageIcon(imagen.getScaledInstance(lblFoto.getWidth(), lblFoto.getHeight(), Image.SCALE_DEFAULT)); lblFoto.setIcon(icono); } } }); btnElegir.setEnabled(false); panelDatosUsuario.add(btnElegir); panelDatosUsuario.add(btnModificar); panelDatosUsuario.add(btnCancelar_1); JPanel panelDatosCanal = new JPanel(); panelDatosCanal.setBorder(new TitledBorder(null, "Datos canal", TitledBorder.LEADING, TitledBorder.TOP, null, null)); getContentPane().add(panelDatosCanal); panelDatosCanal.setLayout(new GridLayout(0, 2, 2, 1)); JLabel lblNombre_1 = new JLabel("Nombre"); panelDatosCanal.add(lblNombre_1); textFieldNomCanal = new JTextField(); textFieldNomCanal.setEditable(false); panelDatosCanal.add(textFieldNomCanal); textFieldNomCanal.setColumns(10); JLabel lblPrivacidad = new JLabel("Privacidad"); panelDatosCanal.add(lblPrivacidad); rdbtnPrivadoCanal = new JRadioButton("Privado"); rdbtnPrivadoCanal.setEnabled(false); rdbtnPrivadoCanal.setSelected(true); buttonGroup.add(rdbtnPrivadoCanal); panelDatosCanal.add(rdbtnPrivadoCanal); JLabel label_1 = new JLabel(""); panelDatosCanal.add(label_1); rdbtnPublicoCanal = new JRadioButton("Publico"); rdbtnPublicoCanal.setEnabled(false); buttonGroup.add(rdbtnPublicoCanal); panelDatosCanal.add(rdbtnPublicoCanal); JLabel lblDescripicin = new JLabel("Descripici\u00F3n"); panelDatosCanal.add(lblDescripicin); JScrollPane scrollPane = new JScrollPane(); panelDatosCanal.add(scrollPane); textAreaDesc = new JTextArea(); textAreaDesc.setEditable(false); scrollPane.setViewportView(textAreaDesc); JLabel label_5 = new JLabel("Categoria"); panelDatosCanal.add(label_5); comboBoxCatCanal = new JComboBox(); panelDatosCanal.add(comboBoxCatCanal); JButton btnModificar_3 = new JButton("Modificar"); btnModificar_3.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { String nickU, nomC,des,catE; Privacidad priv; nickU = (String) comboBoxNick.getSelectedItem(); nomC = textFieldNomCanal.getText(); des=textAreaDesc.getText(); catE= (String)comboBoxCatCanal.getSelectedItem(); if(rdbtnPrivadoCanal.isSelected()){ priv=Privacidad.PRIVADO; }else{ priv=Privacidad.PUBLICO; } if(btnModificar_3.getText()=="Guardar"){ if(comboBoxNick.getSelectedIndex()!=-1 && !nomC.isEmpty() && comboBoxCatCanal.getSelectedIndex()!=-1){ controlUsr.modificarDatosCanal(nickU,nomC,des,priv,catE); infoBox("Canal modificado","Modificar usuario"); textFieldNomCanal.setEditable(false); rdbtnPrivadoCanal.setEnabled(false); rdbtnPublicoCanal.setEnabled(false); comboBoxCatCanal.setEnabled(false); textAreaDesc.setEditable(false); btnModificar_3.setText("Modificar"); }else{ infoBox("Faltan datos completar","Modificar Canal de Usuario"); } btnCancelar_2.setEnabled(false); }else if(btnModificar.getText()=="Modificar"){ rdbtnPrivadoCanal.setEnabled(true); rdbtnPublicoCanal.setEnabled(true); comboBoxCatCanal.setEnabled(true); textAreaDesc.setEditable(true); textFieldNomCanal.setEditable(true); btnModificar_3.setText("Guardar"); btnCancelar_2.setEnabled(true); } } }); panelDatosCanal.add(btnModificar_3); btnCancelar_2 = new JButton("Cancelar"); btnCancelar_2.setEnabled(true); btnCancelar_2.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { rdbtnPrivadoCanal.setEnabled(false); rdbtnPublicoCanal.setEnabled(false); textFieldNomCanal.setEditable(false); textAreaDesc.setEditable(false); comboBoxCatCanal.setEnabled(false); btnModificar_3.setText("Modificar"); String nickU = (String)comboBoxNick.getSelectedItem(); if((String)comboBoxNick.getSelectedItem() != " " && comboBoxNick.getSelectedIndex()!=-1){ //pedir Dt usr= controlUsr.listarDatosUsuario(nickU); usrCanal = controlUsr.mostrarInfoCanal(nickU); limpiar(); cargarDatos(usr, usrCanal, nickU); } btnCancelar_2.setEnabled(false); } }); btnCancelar_2.setEnabled(false); panelDatosCanal.add(btnCancelar_2); JPanel panel = new JPanel(); panel.setBorder(new TitledBorder(null, "Datos video", TitledBorder.LEADING, TitledBorder.TOP, null, null)); getContentPane().add(panel); panel.setLayout(new GridLayout(0, 1, 5, 5)); JLabel label = new JLabel("Nombre"); panel.add(label); comboBoxVideos = new JComboBox(); panel.add(comboBoxVideos); JButton button = new JButton("Modificar"); button.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { //abrir videos.modificar openModificarVideo(); } }); panel.add(button); JPanel panel_1 = new JPanel(); panel_1.setBorder(new TitledBorder(UIManager.getBorder("TitledBorder.border"), "Datos listas de reproduccion", TitledBorder.LEADING, TitledBorder.TOP, null, new Color(0, 0, 0))); getContentPane().add(panel_1); panel_1.setLayout(new GridLayout(0, 2, 2, 1)); JLabel label_2 = new JLabel("Nombre Lista"); panel_1.add(label_2); comboBoxListas = new JComboBox(); comboBoxListas.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { if(comboBoxNick.getSelectedIndex()!=-1 && comboBoxListas.getSelectedIndex()!=-1){ DtListaReproduccion dtLista = controlUsr.infoAdicLDR((String)comboBoxNick.getSelectedItem(), (String)comboBoxListas.getSelectedItem()); if (dtLista.getPrivado()==Privacidad.PRIVADO){ rdbtnPrivadoLista.setSelected(true); }else{ rdbtnPublicoLista.setSelected(true); // System.out.println("publico"); } } } }); panel_1.add(comboBoxListas); JLabel lblPrivacidad_1 = new JLabel("Privacidad"); panel_1.add(lblPrivacidad_1); rdbtnPrivadoLista = new JRadioButton("Privado"); rdbtnPrivadoLista.setEnabled(false); rdbtnPrivadoLista.setSelected(true); buttonGroup_1.add(rdbtnPrivadoLista); panel_1.add(rdbtnPrivadoLista); JLabel label_3 = new JLabel(""); panel_1.add(label_3); rdbtnPublicoLista = new JRadioButton("Publico"); rdbtnPublicoLista.setEnabled(false); buttonGroup_1.add(rdbtnPublicoLista); panel_1.add(rdbtnPublicoLista); JButton button_1 = new JButton("Modificar"); button_1.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { if(button_1.getText()=="Guardar"){ String nickU = (String)comboBoxNick.getSelectedItem(); String nombreL = (String)comboBoxListas.getSelectedItem(); Privacidad privE=Privacidad.PUBLICO; if(rdbtnPrivadoLista.isSelected()){privE=Privacidad.PRIVADO;} controlUsr.cambiarPrivLDR(nickU,nombreL,privE); infoBox("Lista de reproducion modificada","Modificar usuario"); button_1.setText("Modificar"); rdbtnPrivadoLista.setEnabled(false); rdbtnPublicoLista.setEnabled(false); }else if(button_1.getText()=="Modificar"){ button_1.setText("Guardar"); rdbtnPrivadoLista.setEnabled(true); rdbtnPublicoLista.setEnabled(true); } } }); panel_1.add(button_1); JPanel panel_3 = new JPanel(); getContentPane().add(panel_3); GridBagLayout gbl_panel_3 = new GridBagLayout(); gbl_panel_3.columnWidths = new int[]{74, 75, 0}; gbl_panel_3.rowHeights = new int[]{23, 0, 0, 0, 0, 0}; gbl_panel_3.columnWeights = new double[]{0.0, 0.0, Double.MIN_VALUE}; gbl_panel_3.rowWeights = new double[]{0.0, 0.0, 0.0, 0.0, 0.0, Double.MIN_VALUE}; panel_3.setLayout(gbl_panel_3); JButton btnCancelar = new JButton("Cancelar"); GridBagConstraints gbc_btnCancelar = new GridBagConstraints(); gbc_btnCancelar.anchor = GridBagConstraints.NORTHWEST; gbc_btnCancelar.gridx = 1; gbc_btnCancelar.gridy = 4; panel_3.add(btnCancelar, gbc_btnCancelar); btnCancelar.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { //setVisible(false); dispose(); } }); //CARGAR NICK String[] nickUsuario = controlUsr.listarNicknamesUsuarios(); for(int i=0; i<nickUsuario.length;i++){ comboBoxNick.addItem(nickUsuario[i]); } comboBoxNick.setSelectedIndex(-1); limpiar(); } private void openModificarVideo(){ // int idVideo; Fabrica fabrica = Fabrica.getInstance(); IVideoCtrl ICV = fabrica.getIVideoCtrl(); ModificarVideo modVideoIFrame = new ModificarVideo(controlUsr, ICV); String nick = (String)comboBoxNick.getSelectedItem(); String nomVideo = (String)comboBoxVideos.getSelectedItem(); if(comboBoxVideos.getSelectedIndex()!=-1 && (String)comboBoxNick.getSelectedItem() != " " && comboBoxNick.getSelectedIndex()!=-1 ){ modVideoIFrame.llamadaParticular(nick, nomVideo); adminPrincipal.getFrames()[0].setLayout(null); adminPrincipal.getFrames()[0].add(modVideoIFrame); modVideoIFrame.show(); }else{ infoBox("Falta seleccionar usuario y/o video","Modificar Usuario"); } } private void limpiar(){ textFieldEmail.setText(""); textFieldNombre.setText(""); textFieldApellido.setText(""); dateChooser.setDate(null); textFieldNomCanal.setText(""); comboBoxVideos.setSelectedIndex(-1); comboBoxVideos.removeAllItems(); comboBoxCatCanal.setSelectedIndex(-1); comboBoxCatCanal.removeAllItems(); comboBoxListas.setSelectedIndex(-1); comboBoxListas.removeAllItems(); lblFoto.setIcon(null); textAreaDesc.setText(null); } private void cargarDatos(DtUsuario usr, DtCanal usrCanal, String nickU){ textFieldEmail.setText(usr.getEmail()); textFieldNombre.setText(usr.getNombre()); textFieldApellido.setText(usr.getApellido()); dateChooser.setDate(usr.getFechaNacimiento().getFecha()); textFieldNomCanal.setText(usrCanal.getNombre()); if (usrCanal.getPrivado()==Privacidad.PRIVADO){ rdbtnPrivadoCanal.setSelected(true); }else{ rdbtnPublicoCanal.setSelected(true); } textAreaDesc.setText(usrCanal.getDescripcion()); //CARGAR CATEGORIAS Fabrica fabrica = Fabrica.getInstance(); IVideoCtrl iCV = fabrica.getIVideoCtrl(); DtCategoria[] set_cat=iCV.listarCategorias(); for(int i=0; i<set_cat.length;i++){ comboBoxCatCanal.addItem(set_cat[i].getNombre()); } comboBoxCatCanal.setSelectedItem(usrCanal.getCategoria().getNombre()); //CARGAR VIDEOS String[] nomVideos = controlUsr.listarVideosCanal(nickU); for(int i=0; i<nomVideos.length;i++){ comboBoxVideos.addItem(nomVideos[i]); } comboBoxVideos.setSelectedIndex(-1); //CARGAR LISTAS String[] nomListas = controlUsr.listarLDRdeUsuario(nickU); for(int e=0; e<nomListas.length;e++){ comboBoxListas.addItem(nomListas[e]); } comboBoxListas.setSelectedIndex(-1); //Cargar Imagen fotoUsr=usr.getFoto(); if(fotoUsr!=null){ BufferedImage image = Imagen.byteToImagen(fotoUsr); lblFoto.setSize(30,30); ImageIcon icono = new ImageIcon(image.getScaledInstance(lblFoto.getWidth(), lblFoto.getHeight(), Image.SCALE_DEFAULT)); lblFoto.setIcon(icono); }else{ ImageIcon icono = new ImageIcon(); lblFoto.setIcon(icono); } } private Boolean verificarCamposDatosUsu(){ if( textFieldEmail.getText().isEmpty()|| textFieldNombre.getText().isEmpty() || textFieldApellido.getText().isEmpty() || dateChooser.getDate()==null){ infoBox("Campos sin completar", "Aviso"); return false; }else{return true;} } } <file_sep> package uytubeLogica.publicar; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; import javax.xml.datatype.XMLGregorianCalendar; /** * <p>Clase Java para dtFecha complex type. * * <p>El siguiente fragmento de esquema especifica el contenido que se espera que haya en esta clase. * * <pre> * &lt;complexType name="dtFecha"&gt; * &lt;complexContent&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"&gt; * &lt;sequence&gt; * &lt;element name="fecha" type="{http://www.w3.org/2001/XMLSchema}dateTime" minOccurs="0"/&gt; * &lt;/sequence&gt; * &lt;/restriction&gt; * &lt;/complexContent&gt; * &lt;/complexType&gt; * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "dtFecha", propOrder = { "fecha" }) public class DtFecha { @XmlSchemaType(name = "dateTime") protected XMLGregorianCalendar fecha; /** * Obtiene el valor de la propiedad fecha. * * @return * possible object is * {@link XMLGregorianCalendar } * */ public XMLGregorianCalendar getFecha() { return fecha; } /** * Define el valor de la propiedad fecha. * * @param value * allowed object is * {@link XMLGregorianCalendar } * */ public void setFecha(XMLGregorianCalendar value) { this.fecha = value; } } <file_sep>package uytubeLogic.logica; import uytubeLogic.logica.SystemHandler.Privacidad; public interface IVideoCtrl { public abstract DtListaReproduccion[] listarLDRPorCategoria(String cat, Privacidad priv, String nomU); public abstract DtVideo[] listarVideosPorCategoria(String cat, Privacidad priv, String nomU); public abstract DtComentario[] listarComentarios(Integer IDVideo); public abstract void nuevoComentario(Integer IDVideo, String nickU, DtFecha fecha, String contenido); public abstract void responderComentario(Integer IDVideo, Integer IDCR, String nickU, DtFecha fecha, String contenido); public abstract void valorarVideo(Integer IDVideo, String nickU, boolean valoracion); public abstract DtInfoVideo verDetallesVideoExt(Integer IDVideo); public abstract DtVideo infoAddVideo(Integer IDVideo); public abstract DtVideo[] listarVideos(); public abstract DtCategoria[] listarCategorias(); public abstract void crearCategoria(String nombreCat); public abstract boolean existeCategoria(String nombreCat); public abstract boolean memberVideo(Integer idVideo); public abstract DtVideo[] listarVideosPublicosPorNombre(String nombre); public abstract String getEstadoValoracion(Integer IDVideo, String nickUsuario); }
21ea191329dda6115aa2850d1fc71d20a17bcf81
[ "Java", "JavaScript", "Java Server Pages" ]
58
Java
marcobaldi97/tprogproyect
d110c5ce38ad5d286013654634e4b8622b602048
2fd9c896df8c38c179300ec2ffd088059308f823
refs/heads/master
<repo_name>yaohaif/github-service-103d709f-05f8-488f-8607-2fe829f2f1f1<file_sep>/README.md # github-service-103d709f-05f8-488f-8607-2fe829f2f1f1
e671c08383ae79483a5b3b9d882043479ec8c9aa
[ "Markdown" ]
1
Markdown
yaohaif/github-service-103d709f-05f8-488f-8607-2fe829f2f1f1
58809869168007466960cf7b385b4ae7946c540b
16b33fb58ab48076ee61a616132f73f529b0ac79
refs/heads/master
<repo_name>Malik056/Run<file_sep>/app/src/main/java/com/example/mobeen/run/GetDataService.java package com.example.mobeen.run; import com.example.mobeen.run.Models.Article; import com.example.mobeen.run.Models.Venue; import java.util.List; import retrofit2.Call; import retrofit2.http.GET; public interface GetDataService { @GET("hg2do") Call<List<Venue>> getAllVenues(); @GET("bsg4s") Call<List<Feed>> getAllArticles(); }<file_sep>/app/src/main/java/com/example/mobeen/run/BlogActivity.java package com.example.mobeen.run; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.text.method.ScrollingMovementMethod; import android.widget.TextView; public class BlogActivity extends AppCompatActivity { TextView contentTV; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_blog); contentTV = findViewById(R.id.textView7); contentTV.setMovementMethod(new ScrollingMovementMethod()); } } <file_sep>/app/src/main/java/com/example/mobeen/run/FeedActivity.java package com.example.mobeen.run; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.support.v7.widget.DefaultItemAnimator; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.widget.Toast; import com.example.mobeen.run.Models.Article; import com.example.mobeen.run.Models.Venue; //import com.google.android.gms.maps.model.BitmapDescriptorFactory; //import com.google.android.gms.maps.model.LatLng; //import com.google.android.gms.maps.model.MarkerOptions; import java.util.ArrayList; import java.util.List; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; public class FeedActivity extends AppCompatActivity { private List<Feed> feedList = new ArrayList<>(); private RecyclerView recyclerView; private FeedAdapter mAdapter; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_feed); recyclerView = (RecyclerView) findViewById(R.id.recycler_view3); GetDataService service = RetrofitClientInstance.getRetrofitInstance().create(GetDataService.class); Call<List<Feed>> call = service.getAllArticles(); call.enqueue(new Callback<List<Feed>>() { @Override public void onResponse(Call<List<Feed>> call, Response<List<Feed>> response) { mAdapter = new FeedAdapter(response.body(),getApplicationContext()); RecyclerView.LayoutManager mLayoutManager = new LinearLayoutManager(getApplicationContext()); recyclerView.setLayoutManager(mLayoutManager); recyclerView.setItemAnimator(new DefaultItemAnimator()); recyclerView.setAdapter(mAdapter); } @Override public void onFailure(Call<List<Feed>> call, Throwable t) { Toast.makeText(getApplicationContext(), "Something went wrong...Please try later!", Toast.LENGTH_SHORT).show(); } }); } }<file_sep>/app/src/main/java/com/example/mobeen/run/Models/User.java package com.example.mobeen.run.Models; import com.google.firebase.database.IgnoreExtraProperties; @IgnoreExtraProperties public class User { String fullName; String teamName; public User() { } public User(String fullName, String teamName) { this.fullName = fullName; this.teamName = teamName; } public String getFullName() { return fullName; } public void setFullName(String fullName) { this.fullName = fullName; } public String getTeamName() { return teamName; } public void setTeamName(String teamName) { this.teamName = teamName; } } <file_sep>/app/src/main/java/com/example/mobeen/run/BookStadiumActivity.java package com.example.mobeen.run; import android.content.Context; import android.graphics.Color; import android.support.annotation.NonNull; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.TextView; import com.example.mobeen.run.Models.Ground; import java.util.ArrayList; import java.util.List; public class BookStadiumActivity extends AppCompatActivity { Button booked; TextView name; TextView price; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_book_statdium); RecyclerView recyclerView = findViewById(R.id.bookingList); recyclerView.setLayoutManager(new LinearLayoutManager(getApplicationContext())); recyclerView.setAdapter(new BookingAdapter(getApplicationContext(), MapsActivity.grounds)); } } class BookingHolder extends RecyclerView.ViewHolder{ TextView name; TextView price; View background; Button booked; public BookingHolder(@NonNull View itemView) { super(itemView); background = itemView.findViewById(R.id.background); name = itemView.findViewById(R.id.s_name); price = itemView.findViewById(R.id.s_price); booked = itemView.findViewById(R.id.booked_btn); } } class BookingAdapter extends RecyclerView.Adapter<BookingHolder>{ Context context; List<Ground> grounds; BookingAdapter(Context context, List<Ground> groundList) { this.context = context; grounds = groundList; } @NonNull @Override public BookingHolder onCreateViewHolder(@NonNull ViewGroup viewGroup, int i) { View view = LayoutInflater.from(context).inflate(R.layout.book_stadium_item, viewGroup, false); return new BookingHolder(view); } @Override public void onBindViewHolder(@NonNull BookingHolder bookingHolder, final int i) { bookingHolder.name.setText(grounds.get(i).getName()); bookingHolder.price.setText(grounds.get(i).getPrice()); bookingHolder.booked.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if(grounds.get(i).isBooked()) { grounds.get(i).setBooked(false); } else { grounds.get(i).setBooked(true); } } }); if(grounds.get(i).isBooked()) { bookingHolder.background.setBackgroundColor(Color.GREEN); } else { bookingHolder.background.setBackgroundColor(Color.WHITE); } } @Override public int getItemCount() { return grounds.size(); } } <file_sep>/app/src/main/java/com/example/mobeen/run/Models/Article.java package com.example.mobeen.run.Models; import com.google.gson.annotations.SerializedName; public class Article { @SerializedName("title") String titles; @SerializedName("author") String author; @SerializedName("date") String date; @SerializedName("content") String content; public Article(String titles, String author, String date, String content) { this.titles = titles; this.author = author; this.date = date; this.content = content; } public String getTitles() { return titles; } public void setTitles(String titles) { this.titles = titles; } public String getAuthor() { return author; } public void setAuthor(String author) { this.author = author; } public String getDate() { return date; } public void setDate(String date) { this.date = date; } public String getContent() { return content; } public void setContent(String content) { this.content = content; } } <file_sep>/app/src/main/java/com/example/mobeen/run/MapsActivity.java package com.example.mobeen.run; import android.content.Context; import android.support.v4.app.FragmentActivity; import android.os.Bundle; import android.widget.Toast; import com.example.mobeen.run.Models.Ground; import com.example.mobeen.run.Models.Venue; import com.google.android.gms.maps.CameraUpdateFactory; import com.google.android.gms.maps.GoogleMap; import com.google.android.gms.maps.OnMapReadyCallback; import com.google.android.gms.maps.SupportMapFragment; import com.google.android.gms.maps.model.BitmapDescriptorFactory; import com.google.android.gms.maps.model.LatLng; import com.google.android.gms.maps.model.MarkerOptions; import java.util.ArrayList; import java.util.List; import java.util.Random; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; public class MapsActivity extends FragmentActivity implements OnMapReadyCallback { //implements OnMapReadyCallback private GoogleMap mMap; private Context context; public static List<Ground> grounds = new ArrayList<>(); @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_maps); context = this; // Obtain the SupportMapFragment and get notified when the map is ready to be used. SupportMapFragment mapFragment = (SupportMapFragment) getSupportFragmentManager() .findFragmentById(R.id.map); mapFragment.getMapAsync(this); } /** * Manipulates the map once available. * This callback is triggered when the map is ready to be used. * This is where we can add markers or lines, add listeners or move the camera. In this case, * we just add a marker near Sydney, Australia. * If Google Play services is not installed on the device, the user will be prompted to install * it inside the SupportMapFragment. This method will only be triggered once the user has * installed Google Play services and returned to the app. */ @Override public void onMapReady(GoogleMap googleMap) { mMap = googleMap; GetDataService service = RetrofitClientInstance.getRetrofitInstance().create(GetDataService.class); Call<List<Venue>> call = service.getAllVenues(); call.enqueue(new Callback<List<Venue>>() { @Override public void onResponse(Call<List<Venue>> call, Response<List<Venue>> response) { Random random = new Random(); for(int i = 0; i < response.body().size(); i++){ LatLng venue1 = new LatLng(Double.parseDouble(response.body().get(i).getLatitude()),Double.parseDouble(response.body().get(i).getLongitude())); Ground g = new Ground(); g.setName(response.body().get(i).getName()); g.setPrice("" + (800 + random.nextInt(500))); g.setBooked(false); mMap.addMarker(new MarkerOptions().position(venue1).title(response.body().get(i).getName()).icon(BitmapDescriptorFactory.fromResource(R.drawable.stadium_icon)).snippet("venue")); } } @Override public void onFailure(Call<List<Venue>> call, Throwable t) { Toast.makeText(getApplicationContext(), "Something went wrong...Please try later!", Toast.LENGTH_SHORT).show(); } }); LatLng base = new LatLng(31.4812031, 74.3009524); mMap.moveCamera(CameraUpdateFactory.newLatLng(base)); mMap.setMinZoomPreference(12.0f); } } <file_sep>/app/src/main/java/com/example/mobeen/run/TeamsAdapter.java package com.example.mobeen.run; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.TextView; import com.example.mobeen.run.Models.User; import java.util.List; public class TeamsAdapter extends RecyclerView.Adapter<TeamsAdapter.MyViewHolder> { private List<User> teamsList; public class MyViewHolder extends RecyclerView.ViewHolder { public TextView name, level, age; public MyViewHolder(View view) { super(view); name = (TextView) view.findViewById(R.id.tNameTV); level = (TextView) view.findViewById(R.id.tLevelTV); age = (TextView) view.findViewById(R.id.avgAgeTV); } } public TeamsAdapter(List<User> userList) { this.teamsList = userList; } @Override public MyViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { View itemView = LayoutInflater.from(parent.getContext()) .inflate(R.layout.team_list_row, parent, false); return new MyViewHolder(itemView); } @Override public void onBindViewHolder(MyViewHolder holder, int position) { User user = teamsList.get(position); holder.name.setText((user.getTeamName())); holder.age.setText("Average Age: 23"); holder.level.setText("Level:0"); } @Override public int getItemCount() { return teamsList.size(); } }<file_sep>/app/src/main/java/com/example/mobeen/run/MessageActivity.java package com.example.mobeen.run; import android.graphics.Canvas; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.support.v7.widget.DefaultItemAnimator; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.support.v7.widget.helper.ItemTouchHelper; import com.example.mobeen.run.Models.User; import java.util.ArrayList; import java.util.List; public class MessageActivity extends AppCompatActivity { private List<User> movieList = new ArrayList<>(); private RecyclerView recyclerView; private TeamsAdapter mAdapter; SwipeController swipeController = null; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_message); recyclerView = (RecyclerView) findViewById(R.id.recycler_view2); mAdapter = new TeamsAdapter(movieList); RecyclerView.LayoutManager mLayoutManager = new LinearLayoutManager(getApplicationContext()); recyclerView.setLayoutManager(mLayoutManager); recyclerView.setItemAnimator(new DefaultItemAnimator()); recyclerView.setAdapter(mAdapter); prepareTeamsData(); swipeController = new SwipeController(new SwipeControllerActions() { @Override public void onRightClicked(int position) { } }); ItemTouchHelper itemTouchhelper = new ItemTouchHelper(swipeController); itemTouchhelper.attachToRecyclerView(recyclerView); recyclerView.addItemDecoration(new RecyclerView.ItemDecoration() { @Override public void onDraw(Canvas c, RecyclerView parent, RecyclerView.State state) { swipeController.onDraw(c,"Let's Run"); } }); } private void prepareTeamsData() { User user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); user = new User(" <NAME>","Real Madrid FC"); movieList.add(user); } }
5e50170a93ea5539a7298f1d9ade1abb7f62d0af
[ "Java" ]
9
Java
Malik056/Run
292429b8c2fd8493f6682cad606cda7e9c1bc42f
531e33fcdfdf4686b538863357428a93cc0659a0
refs/heads/master
<file_sep>/** * CS171 Final Project - What does SEX look like? * <NAME>, <NAME>, <NAME> */ var menu = d3.select("#menu"); // add caption var caption = menu.append("h2") .attr("id", "menu-caption"); // caption attributes var captionText = []; // add form var form = menu.append("form"); // tag categories var categories = [{id: "sh", name: "sexual health", children: ["cly", "syp", "hiv", "gon"]}, {id: "sb", name: "social behavior", children: ["teen", "gdp", "pop"]}, {id: "porn", name: "pornography usage", children: ["creampie", "teen-tag"]}]; // possible tags var tags = [{id: "cly", name: "chlamydia"}, {id: "syp", name: "syphilis"}, {id: "hiv", name: "HIV"}, {id: "gon", name: "gonorrhea"}, {id: "teen", name: "teen pregnancy"}, {id: "gdp", name: "GDP"}, {id: "pop", name: "population density"}, {id: "creampie", name: "teen tag"}, {id: "teen-tag", name: "creampie tag"}]; var currentCategory = null; // update form with form's children function updateForm(array) { // remove all (possible) nodes in form form.remove(); // add form form = menu.append("form"); // if already selected, add back button if(captionText.length > 0) { var backButton = form.append("i"); backButton .attr("class", "fa fa-arrow-left") .on("click", function() { // remove last tag captionText.pop(); updateForm(categories); }); } // generate buttons buttons = form.selectAll("input") .data(array) .enter() .append("input") .attr("type", "radio") .attr("id", function(d, i) { return "radio" + (i + 1); }) .attr("name", "radios") .attr("value", function(d) { return d.id; }); // add labels var labels = form.selectAll("input").each(function(d, i) { var label = document.createElement("label"); label.setAttribute("for", "radio" + (i + 1)); label.innerHTML = array[i].name; this.parentNode.insertBefore(label, this.nextSibling); }); // update caption text if(captionText.length > 0) { text = "compare"; captionText.forEach(function(d, i) { text += " " + d.name; // format punctuation and grammar if(i == captionText.length - 1 && captionText.length < 3) { text += " and..." } else if(i < captionText.length - 1 && !(i == 1 && captionText.length == 3)){ text += ", "; } else if(i == 1 && captionText.length == 3) { text += " and "; } }); caption.text(text); } else if(currentCategory){ caption.text("show me " + currentCategory + " data on..."); } else { caption.text("show me data from..."); } // set radio button toggle for first selection array.forEach(function(d, i) { d3.select("input[value=\"" + d.id + "\"]"). on("click", function() { // go into category if(d.children) { currentCategory = d.name; updateForm(tags.filter(function(el, j) { return (d.children.indexOf(el.id) > -1) && (captionText.indexOf(el) < 0); })); } // add element to caption and return to categories else { currentCategory = null; // add category to caption captionText.push(d); console.log(d.id); loadFactor(d.id); //updateForm(categories); } }); }); } updateForm(categories); /*<form> // set radio button toggle for first selection categories.forEach(function(d, i) { d3.select("input[value=\"" + d.id + "\"]"). on("click", function() { loadFactor(d.id); captionText.push(d.name); }); }); <p>show me data from...</p> <input type="radio" id="radio1" name="radios" value="cly"> <label for="radio1">Chlamydia</label> <input type="radio" id="radio2" name="radios" value="gon"> <label for="radio2">Gonorrhea</label> <input type="radio" id="radio3" name="radios" value="teen"> <label for="radio3">Teen Pregnancy Rates</label> <input type="radio" id="radio4" name="radios" value="creampie"> <label for="radio4">"Creampie" tag</label> <input type="radio" id="radio5" name="radios" value="teen-tag"> <label for="radio5">"Teen" tag"</label> <input type="radio" id="radio6" name="radios" value="syp"> <label for="radio6">Syphilis</label> </form>*/<file_sep>**CS171 Final Project <NAME> <NAME> <NAME>** What is the landscape of sex in the United States? Here we sought to illuminate this complexity by bringing together various aspects of sex and sexual health. Please find below a link to further information about our visualization! **IMPORTANT WHEN VIEWING VIS** If things are loading in weird places, zoom out and try reloading the page for the viz to pop into allignment. Finally, data can take a little while to load in at the start of the page. Please take a second to breathe in, enjoy being sentient, and then click away :) **Starting with the folders:** Data: Hosts all our .csv files. All data was downloaded and manually loaded. As also explained in the process book, the most comprehensive and publically released recent Center for Disease Control survey on STIs nationwide was conducted in 2012. Thus, all our data was collected from the CDC files and other sources with that specific year in mind. These were undynamic files because of their hard-coded historical values, so we did not fear our vis would suffer being "outdated" if not pulled directly from the web. Plus, downloading and converting on our own allowed us to set up the CSV files in more managable ways. js: these are our libraries: TOPOjson, d3, jquery, and radar-chart.js. TOPOjson was used to plot the map (used pset 4 as a springboard), d3 was used for dynamic graphs, jquery for css selectors, and radar for the radar chart. Lines 165-175 of the radar.js file were written additionally by Lucas. The rest is from the repo. **Scripts** can be found in scripts.js, **style** in styles.css, and the **homepage** in index.html. Screen Capture! https://www.youtube.com/watch?v=vT6egRltdv4&feature=youtu.be
3318cb301bfe7b3c696d63a7f2f36815a86c9d37
[ "Markdown", "JavaScript" ]
2
Markdown
vivianleung/cs171-final
027f674f9a462fcaeae1afac27bf5df180e343e4
74d7db236e212e373ca4be373d26d89ba65bbe82
refs/heads/main
<repo_name>steversonTong/Random-Password-Generator-<file_sep>/homework3/script.js // Assignment Code var generateBtn = document.querySelector("#generate"); var lengthInput = document.querySelector("#charLength") // Write password to the #password input function writePassword() { var password = <PASSWORD>Password(); var passwordText = document.querySelector("#password"); passwordText.value = password; } function charlength(){ var numberOfChar = lengthInput.value; if(numberOfChar < 8) { alert("The number of charcter you asked for is less than 8! Please input character length that is more than 8 and less than 128!"); return 0; } if (numberOfChar > 128) { alert("The number of charcter you asked for is greater than 128! Please input character length that is more than 8 and less than 128!"); return 0; } else{ return numberOfChar; } } function generatePassword() { var length = charlength(); var char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890`=~!#$%^&*()+[]{};:,/<>?"; var returnValue = ""; for(var i = 0, n = char.length; i < length; i++) { returnValue += char.charAt(Math.floor(Math.random() * n)); } return returnValue; } // Add event listener to generate button generateBtn.addEventListener("click", writePassword); <file_sep>/README.md # Homeowrk-3 This is my repo for hw3
7febd61ee363f70fb06b14b9b6911a63bfafca7a
[ "Markdown", "JavaScript" ]
2
Markdown
steversonTong/Random-Password-Generator-
98886261c1a30bce6265a019912b9cb84ad8d5e5
5edaf5ddbcb929ed31e205e844d91052d39c33b9
refs/heads/main
<file_sep>import socket, sys, time from datetime import datetime, timedelta from enum import Enum from threading import Thread from requests import get from cryptography.fernet import Fernet key = Fernet.generate_key() f = Fernet(key) host, port = 0, 0 superUserPassword = '' # &e = message end, to stop 2 messages sent after one another to get combined without using async (it splits in client.py). users = list() # All users, active and inactive. connections = dict() # address: connection activeUsers = dict() # address: user bannedIPs = list() # address[0] class Restriction(Enum): # Might turn into flag later none = 0 ban = 1 timeout = 2 slowmode = 3 readonly = 4 nofiles = 5 # Can't share files class User: # Change attr(?)+, delete self? global users, connections global f lastAddress = 0 # Used for unbanning purposes currentAddress = 0 # Also used to check if account is active/logged in. flagType = Restriction.none # Flagged for timeout, slowmode, etc. restrictionTimer = 0 # Time (in seconds) that user is timed out, or between each message in the case of slow mode lastMessagedTime = 0 # For slowmode or timeout def __init__(self, name, password, address): self.name = name # Since all names must be unique, there's no need for a separate id. self._password = <PASSWORD> self.login(address, name, password) def login(self, address, username, password): # Logs in, if possible, and sends back whether successful. correctInfo = self.name == username and self._password == password alreadyActive = self.currentAddress != 0 if correctInfo and not alreadyActive: self.currentAddress = address self.send('&l') # Command to tell the client they're logged in. print(f"{self.name}: {self.currentAddress}.") # Server side time.sleep(0.05) # Sleeping might not be the best solution, but I can't be fucked to use async + threading (yet). self.send(f'Welcome, {self.name}.') self.sendToAll(message=f'{self.name} entered the chat.', noname=True) return correctInfo and not alreadyActive def logout(self, banned=False): self.sendToAll(message=f'{self.name} has been banned from the chat.' if banned else f'{self.name} left the chat.', noname=True) # Turn into switch-case when more logout options are added (e.g. timeout). self.send('&b' if banned else '&c') connections[self.currentAddress].close() activeUsers.pop(self.currentAddress) connections.pop(self.currentAddress) self.lastAddress, self.currentAddress = self.currentAddress, 0 print(f'{self.name} logged out.') # Server side def send(self, message): # Wrapping connection.sendall() so that all actions are done through the user, not some user and some connection. connections[self.currentAddress].sendall(f.encrypt(f"{message}&e".encode())) def whisper(self, username, message): if user := getUser(username): connections[user.currentAddress].sendall(f.encrypt(f"&w {self.name} (whispered): {message}&e".encode())) def sendToAll(self, message, noname=False): # Send message to all others in the chat. if len(connections) > 1: for toAddress in [x for x in activeUsers.keys() if x != self.currentAddress]: # So we don't send back to the sender data = f"{message}&e" if noname else f"{self.name}: {message}&e" connections[toAddress].sendall(f.encrypt(data.encode())) class SuperUser(User): global activeUsers global bannedIPs def ban(self, username): if user := getUser(username): bannedIPs.append(user.currentAddress[0]) # [0] To just take IP, not the port - which changes upon reconnection. user.logout(banned=True) user.flagged = Restriction.ban return self.send('No user was found with that name.') def unban(self, username): if user := getUser(username): bannedIPs.remove(user.lastAddress[0]) user.flagged = Restriction.none self.send(f'{user.name} has been unbanned. No one else can see this message.') def slowmode(self, username, timelength): if user := getUser(username): user.flagged = True user.restrictionTimer = int(timelength) def timeout(self, username, timelength): if user := getUser(username): user.flagged = True user.restrictionTimer = int(timelength) def getInput(connection, message): # Networking version of input(message). FP and not OOP, because this function is only used by clients who haven't been assigned to users yet. global f connection.sendall(f.encrypt(f"{message}&e".encode())) return f.decrypt(connection.recv(4096)).decode() def getUser(username): for userCheck in users: if userCheck.name == username: return userCheck return False def userLogin(connection, address): # Logging in (or registering). global users, activeUsers global f, superUserPassword while True: option = getInput(connection, 'Select option: (L)ogin, (R)egister, (S)uperuser registration.') if option == 'R' or option == 'S': # Registering if option == 'S': suPassword = getInput(connection, 'Enter superuser password:') if suPassword != superUserPassword: connection.sendall(f.encrypt('Incorrect SUP.'.encode())) break username = getInput(connection, 'Set username:') password = getInput(connection, 'Set password:') if username == 'You' or username[0] == '!' or username[0] == '&': connection.sendall(f.encrypt("Don't make this more confusing for others.&e".encode())) time.sleep(0.05) continue elif len(username.split()) > 1: connection.sendall(f.encrypt("Sorry, but you can't have spaces in your username.&e".encode())) time.sleep(0.05) continue if getUser(username) is False: user = User(username, password, address) if option == 'R' else SuperUser(username, password, address) users.append(user) activeUsers[address] = user return user else: connection.sendall(f.encrypt('Username taken, try again.'.encode())) elif option == 'L': # Logging in if len(users) > 0: username = getInput(connection, 'Username:') password = getInput(connection, 'Password:') for checkUser in users: if checkUser.login(address, username, password): users.append(checkUser) activeUsers[address] = checkUser user = checkUser return user connection.sendall(f.encrypt('Login failed, please try again.'.encode())) else: connection.sendall(f.encrysendpt('No accounts have been registered on this server yet.'.encode())) time.sleep(0.05) def thread_recv(connection, address): # Receiving messages from clients, and caching them to send to others. global connections global f user = userLogin(connection, address) while True: data = f.decrypt(connection.recv(4096)).decode() if not data: # If it's empty continue '''try: if user.flagType is not Restriction.none: # Restrictions if datetime.now() - user.lastMessagedTime < timedelta(seconds=user.restrictionTimer): user.send("Sorry, but you can't send anything right now") # Make more clear later (type of restriction). continue except: print(user)''' # Continue on this alter # Clean this all once switch cases are implemented if data == '&_b': # Ban conformation from user end (also needed to close on user end). break elif data[0] == '&': user.send("Unable to send message (cannot start with '&', seeing at that is used to server commands).") elif data[0] == '!': if data == '!c': # Need...switch cases... user.logout() break elif data == '!h': user.send(' !h: help\n !c: close\n !w <username>: whisper to user\n !f <filepath>: share a file (TBI)' + ('' if not isinstance(user, SuperUser) else '\n !b <username>: ban user\n !B <username>: unban user' '\n !t <username> <seconds>: timeout user (TBI)\n !s <username> <seconds>: slowmode chat on user (TBI)')) elif data.split()[0] == '!w': user.whisper(data.split()[1], ' '.join(data.split()[2::])) elif data.split()[0] == '!b' or data.split()[0] == '!B': # If the first argument is '!b' or '!B' if isinstance(user, SuperUser): if len(data.split()) == 2: if data.split()[0] == '!b': user.ban(data.split()[1]) else: user.unban(data.split()[1]) else: user.send('Too many or too few arguments given.') else: user.send("You don't have permission to use this function.") elif data.split()[0] == '!f': if user.flagType != Restriction.nofiles: if len(data.split()) == 2: pass # Get file, store somehow else: user.send('Too many or too few arguments given.') else: user.send("You don't have permission to use this function.") else: user.send('Command not found. Type !h for help') else: user.sendToAll(data) def thread_accept(): # Accepting new connections. global s, connections global f while True: conn, addr = s.accept() connections[addr] = conn if addr[0] not in bannedIPs: Thread(target=thread_recv, args=(conn, addr)).start() else: conn.sendall(f.encrypt("&b".encode())) # Not tested/implemented completely yet. if __name__ == "__main__": if len(sys.argv) >= 4: # Rewrite to a nice, clean match case once available if sys.argv[1] == '-L': host = 'localhost' elif sys.argv[1] == '-W': host = socket.gethostbyname(socket.gethostname()) else: print('Invalid IP argument.') sys.exit() port = int(sys.argv[2]) # 12082 superUserPassword = sys.argv[3] fileSharing = '-F' in sys.argv if '-C' in sys.argv: # For later import pyperclip3 pyperclip3.copy(key.decode()) '''if fileSharing: filePath = sys.argv[5] if os.path.isabs(filePath) else '' # Finish this later.''' else: print('Too few arguments.') sys.exit() if sys.argv[1] == '-W': print(f"Public: {get('https://api.ipify.org').text}, Private: {socket.gethostbyname(socket.gethostname())}.") print(f"Key: {key.decode()}") s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, port)) s.listen(32) Thread(target=thread_accept).start() <file_sep>import socket, sys, time from threading import Thread from cryptography.fernet import Fernet #import clientGUI # 0.4 Update: Change client.py to only logic and insert IO/UI stuff in clientGUI.py key = 0 f = 0 loggedIn = False def sendtoserver(s): while True: argument = input('You: ') # Password typing hiding later (when switching to GUI). argumentEncrypted = f.encrypt(argument.encode()) if argument == '!c': print('Closing Connection...') s.sendall(argumentEncrypted) raise SystemExit s.sendall(argumentEncrypted) def getfromserver(s): global loggedIn, f while True: rawdata = f.decrypt(s.recv(4096)).decode() for data in rawdata.split('&e'): # To stop messages combining. (See server.py) if not data: # If it's empty continue if len(data.split()) > 1 and data.split()[0] == '&w': print(f"\r{' '.join(data.split()[1::])}\nYou: ", end='') elif data == '&c': s.close() raise SystemExit elif data == '&l': loggedIn = True continue elif data == '&b': # Fix input threading stuff later. s.sendall(f.encrypt('&_b'.encode())) time.sleep(0.05) print("\rYou've been banned from this server.") s.close() raise SystemExit else: print(f"\r{data}") print("\rYou: " if loggedIn else f"\r> ", end='') if __name__ == "__main__": host, port = 0, 0 try: host = 'localhost' if sys.argv[1] == '-L' else sys.argv[1] port = int(sys.argv[2]) # 12082 f = Fernet(sys.argv[3].encode()) except Exception as error: print('Invalid credentials.') sys.exit() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) Thread(target=sendtoserver, args=(s, )).start() Thread(target=getfromserver, args=(s, )).start() <file_sep>class User: # Permissions active = False currentAddress = 0 lastMessagedTime = 0 def __init__(self, name, id, password, address): self.name = name self.id = id self._password = <PASSWORD> self.active = True self.currentAddress = address def checkLogin(self, username, password): return self.name == username and self._password == password def login(self, address): self.active = True self.currentAddress = address def logout(self): self.active = False self.currentAddress = 0 def changeName(self, name): self.name = name def changeName(self, password): self._password = <PASSWORD> class SuperUser(User): def ban(self, user): pass def kick(self, user): pass def promote(self, user): pass def demote(self, superuser): pass def restrict(self, user): pass
17748e6ec7a9a669655e75b2b27d0e8b2dbc35b6
[ "Python" ]
3
Python
RobertvdLeeuw/ChatProgram
136973a6d22d8200e23555f6f00e91c17498ee11
b411f610b02a09004629d1eae4a208e11ecf74e5
refs/heads/master
<file_sep>(ns reagent-boot.core (:require [reagent.dom :as dom])) (defn simple-component [] [:div [:p "I am a component!"] [:p.someclass "I have " [:strong "bold"] [:span {:style {:color "red"}} " and red "] "text."]]) (defn ^:export init [] (.log js/console "Hello from ClojureScript!") (dom/render [simple-component] (.getElementById js/document "app"))) <file_sep>A simple project to get started with Reagent and boot. Usage: 1. `boot dev` to start the tasks and set up a nREPL server 2. Open another terminal, `boot repl -c` to start a nREPL client, and type `(start-repl)` to start the bREPL References: - https://reagent-project.github.io/ - https://github.com/magomimmo/modern-cljs - https://github.com/boot-clj/boot
cf862a935459b3bfb2c0825e98c3ff4e3450cf66
[ "Clojure", "Markdown" ]
2
Clojure
whatacold/reagent-boot-template
d791a17d51f85b017749093578f5bdeebce050f0
a3ff0e9e4a5a2ae94cf21fd677123d342dde2893
refs/heads/master
<repo_name>andwaitforit/YelpSentimentAnalysis<file_sep>/README.md # YelpSentimentAnalysis A sentiment analysis of yelp reviews
eef6fd02fc74ad1846498e686d13b08a36ebb8da
[ "Markdown" ]
1
Markdown
andwaitforit/YelpSentimentAnalysis
bc0016ef6d83503e0107be4d093351e717d0797e
63f79141696c835452f460da6d4f6677d801294c
refs/heads/master
<file_sep>package com.nit.service; import java.util.Arrays; import org.springframework.beans.BeanUtils; import org.springframework.beans.factory.BeanFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.nit.dto.CustomerDTO; import com.nit.entity.Customer; import com.nit.repo.CustomerRepo; @Service("custService") public class CustomerMnmtServiceImpl implements CustomerMnmtService { @Autowired private CustomerRepo custRepo; @Override public String registerCustomer(CustomerDTO dto) { System.out.println(custRepo.getClass()+"--->"+Arrays.toString(custRepo.getClass().getInterfaces())); Customer cust=null; // Convert dto to entity cust=new Customer(); BeanUtils.copyProperties(dto, cust); //use Repo cust=custRepo.save(cust); return cust!=null?"Object Saved with id"+cust.getCno():"Object Not save"; } } <file_sep>rootProject.name = 'SpringDataProj1-CRUDRepo-Directmethod-1'
f03b2194c6d1c24a13eaf13caf0643aa5c1fe18f
[ "Java", "Gradle" ]
2
Java
thevishalvairal/SpringBoot
ba56b963d031b5799f79ad4c7f293c687a6f4d78
4f4b87242cd0363073a3cff8e6fde0f936646a41
refs/heads/master
<repo_name>Codefeed-Org/kpm_module_premake<file_sep>/README.md # kpm_module_premake Krypton module to support premake. <file_sep>/run.lua function getopt( arg, options ) local tab = {} for k, v in ipairs(arg) do if string.sub( v, 1, 2) == "--" then local x = string.find( v, "=", 1, true ) if x then tab[ string.sub( v, 3, x-1 ) ] = string.sub( v, x+1 ) else tab[ string.sub( v, 3 ) ] = true end elseif string.sub( v, 1, 1 ) == "-" then local y = 2 local l = string.len(v) local jopt while ( y <= l ) do jopt = string.sub( v, y, y ) if string.find( options, jopt, 1, true ) then if y < l then tab[ jopt ] = string.sub( v, y+1 ) y = l else tab[ jopt ] = arg[ k + 1 ] end else tab[ jopt ] = true end y = y + 1 end end end return tab end function open_temp_script() local handle local fname while true do fname = "yourfile" .. tostring(math.random(11111111,99999999) .. ".bat") handle = io.open(fname, "r") if not handle then handle = io.open(fname, "w") break end io.close(handle) end return handle, fname end function exists(file) local ok, err, code = os.rename(file, file) if not ok then if code == 13 then -- Permission denied, but it exists return true end end return ok, err end function cmakeBuild(options) script, name = open_temp_script() script:write("premake5 vs2019 ..\n") script:write("devenv " .. options["name"] .. " /Build " .. options["buildtype"] .. " \n") io.close(script) os.execute(name) os.remove(name) end options = getopt(arg, "") --options["builddir"] --options["buildtype"] --options["creates"] --options["name"] --options["parameters] if options["buildtype"] == nil then options["buildtype"] = "Release" end if options["parameters"] == nil then options["parameters"] = "" end local build = true if options["creates"] ~= nil then if exists(options["creates"]) then build = false end end if build then cmakeBuild(options) end print(options["builddir"]) local result = {path = options["builddir"] .. "\\" .. options["buildtype"]} return result
5bcae9eeb6bf204e82840825903454e7d828836d
[ "Markdown", "Lua" ]
2
Markdown
Codefeed-Org/kpm_module_premake
0dac5f1ba8e5eb8dd8384c571391bdd69d5da06a
b72a26140d71fbc14a7dd2d3aa85297a3ff2fe2b
refs/heads/master
<repo_name>jaimev408/basicML<file_sep>/HW4/LAHW4.Rmd --- title: "HW 4 Linear Algebra" author: "<NAME>, <NAME>, <NAME>" date: "12/8/2019" output: html_document: number_sections: no toc: yes toc_float: collapsed: no pdf_document: toc: yes --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` # Assignment Intro Problem Statement Classification by computer of handwritten digits is a standard problem in pattern recognition. The typical application is automatic reading of zip codes on envelopes. In this assignment you’ll address the following problem: Given a set of manually classified digits (the training set), classify a set of unknown digits (the test set) using SVD method. Data Set We will be using the US postal Service database that contains 1707 training and 2007 test digits (uploaded to Canvas). Each image is a grayscale 16x16 image that is The training images are stored in trainInput.csv. (256x1707). • The correct digit corresponding to each column of trainInput is stored in trainOutput.csv.(1x1707). • The test images are stored in testInput.csv. (256x2007). • The correct digit corresponding to each column of testInput is stored in testOutput.csv.(1x2007). # Procedure We start the project by reading the data in and transposing the trainInput, trainOutput, and testInputDataframes. We also read the libraries we will use. ```{r, echo=TRUE} library(MASS) trainInput = read.csv("C:/Users/lalin90/Downloads/trainInput.csv", header= FALSE) trainOutput = read.csv("C:/Users/lalin90/Downloads/trainOutput.csv", header = FALSE) testInput = read.csv("C:/Users/lalin90/Downloads/testInput.csv", header = FALSE) testOutput = as.matrix(read.csv("C:/Users/lalin90/Downloads/testOutput.csv", header = FALSE)) trainInputTran = t(trainInput) trainOutputTran = t(trainOutput) testInputTran = t(testInput) trainInputOutput = cbind(trainInputTran, trainOutputTran) ``` Afterwards, we use a loop to do the following: - make 10 different matrices from the trainInput classified by the trainOutput to seperate the rows of digits by the number they represent - determine an SVD on each of these matrices - use the `v` part of the SVD model to represent singular digit images - Only use the first 20 singular images of each digit - Solve a least squares solution for the testInput data and the first 20 colums of the singular digit images - calculate the residual error ```{r, echo=TRUE} for (i in 0:9){ trainInput = trainInputOutput[trainInputOutput[,257] == i,c(1:256)] svdV = svd(trainInput) svdTop = svdV$v[,1:20] One = apply(testInputTran,1, function(x) ginv(svdTop)%*%x) Two = apply(One,2, function(y) svdTop%*%y) Three =testInputTran-t(Two) err = apply(Three, 1, function(z) sqrt(sum(z^2,0))) nam <- paste("err", i, sep = "") assign(nam, err) } ``` # Results Once we have the residual error, we use it to approximate digits based on the smallest error. We compare the results of our model to the actual results obtrained from the testOutput dataset. ```{r} errors <- cbind(err0,err1,err2,err3,err4,err5,err6,err7,err8,err9) trueO = as.matrix(apply( errors, 1, function(x) which.min(x) - 1)) table(testOutput,trueO) ``` Based on the table above, we can clearly see that the diagonals hold the biggest values by far, meaning that the models mostly matches the true output. <file_sep>/HW3/finalHW3LA.Rmd --- title: "Principal Component Analysis Assignment" author: "<NAME>,<NAME>,<NAME>" date: "11/16/2019" output: html_document: number_sections: no toc: yes toc_float: collapsed: no pdf_document: toc: yes --- # Function 1. <b>The function must have 2 inputs:</b> + <b>a data frame that contains the data set</b> + <b>a scaler that denotes the desired percentage of variance preserved in the transformed data set</b> 2. <b>The function must have 3 outputs returned in a list, called number, PCA, and transformed:</b> + <b>the minimum number of principal components that is necessary to retain the desired variance specified by the user</b> + <b>the principal components corresponding to previous part, in a matrix</b> + <b>the transformed data set in a matrix</b> This function takes two parameters, a dataframe data and a percentage varPercent. It calculates the minimum number of PCAs necesary to reach the variance percentage, the PCA components, and the new transformed matrix and returns all three in a list. ```{r } library(psych) MyPCA<-function(df,scaler){ newmatrix=data.matrix(df, rownames.force = NA) #transform dataframe to a matrix datamean=colMeans(newmatrix) #center the matrix (notice the vectors are row-distributed in a dataframe) datacentered=sweep(newmatrix,2,datamean) S=1/(nrow(newmatrix)-1)*(t(datacentered)%*%datacentered) #find the convariance matrix Eigenvalue=eigen(S)$value #find eigenvector and eigenvalue Eigenvector=eigen(S)$vectors var=Eigenvalue/tr(S) #find the fraction of variance each principle components contributes a=0 i=1 k=0 while (i<=length(var)) { a=a+var[i] k=i i=i+1 if (a>= scaler){ i=length(var)+1} } number=k #the minimum numbers of principle components required to meet desired variance P=Eigenvector[,1:number] #the principal components corresponding to k Y=t(P)%*%t(datacentered) PCA=P transformed=t(Y) #transformed dataset in a matrix finalresult<-list(number,PCA,transformed) finalresult } ``` # Output <b> Run your function on the first 5 features of R longley data set with the desired retained variance of 0.98. Longley data set is a data frame with 7 features related to economics. You will use the following features for this assignment: GNP.deflator, GNP, Unemployed, Armed.Forces, and Population. </b> We will test the function with the first 5 features of R's longley dataset and a desired variance of 0.98. ```{r } longleysub<-longley[,1:5] MyPCA(longleysub,0.98) ``` The list returned above shows us the minimum number needed to retain the submitted variance, the principal components, and the new transformed dataset.
9ec900d8e7c56bf8adc70e29d5bbd215f59ab1ef
[ "RMarkdown" ]
2
RMarkdown
jaimev408/basicML
7809dd0d8ecfa7ab4a9adc08b14a5e95376df941
399aefa849b0b8dfbd5211f81decfb90400bcbbb
refs/heads/master
<file_sep>// Sorteador function sorteio() { return Math.floor(Math.random()* 10); } // Variavél var numeros = "0123456789"; var letramini = "abcdefghijklmnopqrstuvwxyz"; var letramaiusc = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; var caracteres = "`~!@#$%^&*()-_=+[{]}\\|;:'\",<.>/? "; function sorteiodeCaracter(number, lower, upper, other, extra) { var adicional = extra; if (number == true) { adicional += numeros; } if (lower == true) { adicional += letramini; } if (upper == true) { adicional += letramaiusc; } if (other == true) { adicional += caracteres; } return adicional.charAt(sorteio(0, adicional.length)); } function getSenha(length, extraC, primeiroNumero, mini, maiusc, outros, segundoNumero, miniSeg, maiuscSeg, outrosSeg) { var senhaTest = ""; if (length < 3 || length > 30) { alert("Esse número é invalido!! Por favor insirar numero entre 3 á 30 !!"); } if (length > 0) { senhaTest = senhaTest + sorteiodeCaracter(primeiroNumero, mini, maiusc, outros, extraC); for (var i = 1; i < length; ++i) { senhaTest = senhaTest + sorteiodeCaracter(segundoNumero, miniSeg, maiuscSeg, outrosSeg, extraC); } } return senhaTest; }<file_sep># Nucleo-de-desenvolvimento
5e9f17d2d796edf31157c6f91786d181953604bf
[ "Markdown", "JavaScript" ]
2
Markdown
Jessica-Silvaj/Nucleo-de-desenvolvimento
9727605fad68032ce87dd9a93a3d558d70d5a8b0
34c99e296056eb2d1eea8eb315368fc09700898a
refs/heads/master
<file_sep># Password-Manager It´s not the best but it´s my I would be very happy if you could improve it. <file_sep>import hashlib global MainHash MainHash = None def Hashing(): global MainHash UserName = input("Enter your username:\n") UserPassword = input("Enter your password:\n") TheHash = hashlib.new('whirlpool') TheHash.update(bytes(UserPassword, 'utf-8')) PasswordHash = TheHash.hexdigest() TheHash.update(bytes(UserName, 'utf-8')) UserHash = TheHash.hexdigest() MainHash = (PasswordHash + UserHash)*300 Hashing() while True: action = input("Encrypt|1| Decrypt|2| Chance user|3| Exit|4|\n") def otp_encrypt(message: str, key:str): assert(len(message) <= len(key)) m = message.encode() k = key.encode() l = [] for counter, i in enumerate(m): l.append(i ^ k[counter]) return bytes(l) if action == "1": import string zahl1 = string.ascii_letters + string.digits + string.punctuation from random import randint def pick(words): num_words = len(words) num_picked = randint(0, num_words -1) word_picked = words[num_picked] return word_picked def pick_new(long): for _ in range(int(long)): global Password Password += pick(zahl1) print(Password) GoodPassw(long) def GoodPassw(long): askME = input("Ist das Password gut? (y/no) ") if askME == "no": global Password Password = "" print("ok") pick_new(long) def newPassword(): long = input("Wie lang soll das Passwort sein? ") global Password Password = "" pick_new(long) print('Mit "skip" überspringst du den Punkt') NameOfProgramm = input("Wie heißt das Programm? ") if NameOfProgramm == "skip" or NameOfProgramm == "": NameOfProgramm = "" else: NameOfProgramm = NameOfProgramm + ": " Name = input("Wie lautet dein Name? ") if Name == "skip" or Name == "": Name = "" else: Name = "|Name:" + Name + "|" Username = input("Wie lautet dein Username? ") if Username == "skip" or Username == "": Username = "" else: Username = "|Username:" + Username + "|" Mail = input("Wie lautet die E-Mail?") if Mail == "skip" or Mail == "": Mail = "" else: Mail = "|E-Mail : " + Mail + "|" Password = input("Wie lautet das Password? 'neu' generiert ein neues: ") if Password == "<PASSWORD>" or Password == "": Password = "" elif Password == "neu": newPassword() Password = "|Password:" + Password + "|" else: Password = "|Password:" + Password + "|" Date = input("Wie lautet dein Geburtstag? ") if Date == "skip" or Date == "": Date = "" else: Date = "|Data:" + Date + "|" with open("Test_Password", "a") as f: passw = NameOfProgramm +Password + Name + Username + Mail + Date cipher = str(otp_encrypt(passw, MainHash).decode()) f.write(cipher.replace("\n","")+ "\n") elif action == "2": def opt_decrypt(ciphertext:bytes, key:str): k = key.encode() l = [] for counter, i in enumerate(ciphertext): l.append(i ^ k[counter]) return bytes(l).decode('utf-8') with open("Test_Password", "rb") as f: f = f.readlines() for i in range(len(f)): cipher = f[i] print(opt_decrypt(cipher, MainHash)) elif action == "3": Hashing() elif action == "4": break
e16f9c174f277c2bad4ce228cb61d86f473e7907
[ "Markdown", "Python" ]
2
Markdown
slin100/Password-Manager
847927f43889c97fafbcc747c285db47cb674740
ce7b34846080e6d4f522e28c995d18073a54acd5
refs/heads/master
<file_sep>from django.shortcuts import render from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from django.views import View from django.views.generic import ListView, CreateView, DetailView # from ipware.ip import get_real_ip from .models import File, DLink, RequestInfo, Directory from .forms import FileForm class FileCreate(View): # Rewrite to forms? create view? form_class = FileForm template_name = 'upload_form.html' def get(self, request, *args, **kwargs): form = self.form_class() return render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs): form = self.form_class(request.POST, request.FILES) if form.is_valid(): new_file = File(file=request.FILES['file']) new_file.save() return HttpResponseRedirect(reverse('file-list')) else: print("nie walid? :(") return render(request, self.template_name, {'form': form}) class FileList(View): template_name = 'list.html' def get(self, request, *args, **kwargs): all_files = File.objects.all() return render(request, self.template_name, {'files': all_files}) def file_details(request, id): # TODO: Authorize return HttpResponseRedirect(reverse('stats-detail', args=[id])) class StatsDetail(View): template_name = 'f_stats.html' def get(self, request, *args, **kwargs): file_object = get_object_or_404(File, id=self.kwargs['id']) visited = RequestInfo.objects.filter(file=file_object).count() context = {"file": file_object, "visited": visited} return render(request, self.template_name, context) class DLinkList(ListView): model = DLink template_name = 'f_links.html' def get_queryset(self): return DLink.objects.filter(file=self.kwargs['id']) def get_context_data(self, **kwargs): context = super(DLinkList, self).get_context_data(**kwargs) context['file'] = get_object_or_404(File, id=self.kwargs['id']) return context class DLinkCreate(View): def get(self, request, *args, **kwargs): file_object = get_object_or_404(File, id=self.kwargs['id']) dlink_new = DLink(file=file_object) dlink_new.save() return HttpResponseRedirect( reverse('dlink-list', args=[file_object.id]) ) class DLinkOpen(View): def get(self, request, *args, **kwargs): key = self.kwargs['key'] dlink = get_object_or_404(DLink, id=key) file = dlink.file # visitor_ip = get_real_ip(request) visitor_ip = '192.168.3.11' request_stats = RequestInfo(file=file, ip=visitor_ip, url=key) request_stats.save() return render(request, 'link_open.html', {'file': file}) class DirectoryList(ListView): model = Directory template_name = 'list_dirs.html' class DirectoryCreate(CreateView): model = Directory template_name = 'create_dir.html' fields = ['name', 'public'] class DirectoryDetail(DetailView): model = Directory template_name = 'detail_dir.html' <file_sep>from django.db import models import uuid class Directory(models.Model): name = models.CharField(max_length=100) public = models.BooleanField(default=False) def get_absolute_url(self): return '/dir/' class File(models.Model): id = models.IntegerField(primary_key=True) file = models.FileField(upload_to='files') # TODO: user based dir = models.ForeignKey(Directory, blank=True, null=True) # stats = models.ForeignKey() def __str__(self): return str(self.file) class RequestInfo(models.Model): """ Informations about request for specific file """ file = models.ForeignKey(File) ip = models.GenericIPAddressField() url = models.CharField(max_length=100) class DLink(models.Model): """ Download link for specific file """ file = models.ForeignKey(File) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) class UploadLink(models.Model): file = models.ForeignKey(File) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) <file_sep>from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.FileList.as_view(), name='file-list'), url(r'^upload/$', views.FileCreate.as_view(), name='file-create'), url(r'^file/(?P<id>[0-9]+)/$', views.file_details, name='file-detail'), url(r'^file/(?P<id>[0-9]+)/stats/$', views.StatsDetail.as_view(), name='stats-detail'), url(r'^file/(?P<id>[0-9]+)/dlinks/$', views.DLinkList.as_view(), name='dlink-list'), url(r'^file/(?P<id>[0-9]+)/links/add/$', views.DLinkCreate.as_view(), name='dlink-create'), url(r'^link/(?P<key>[0-9a-z-]+)/$', views.DLinkOpen.as_view(), name='dlink-open'), url(r'^dir/$', views.DirectoryList.as_view(), name='dir-list'), url(r'^dir/add/$', views.DirectoryCreate.as_view(), name='dir-create') ] <file_sep>django==1.10.5 django-ipware==1.1.6
1e4f35ff2dc9e355fb29caa084b0c8c186ca1317
[ "Text", "Python" ]
4
Text
konradbnet/FileShare
c2a5a5d2a7b4210753a1bf19d7394136e25afab7
d9b21c76c5d775d1c16484ae6b12a3524dd3f40f
refs/heads/main
<file_sep># hello-world A new repository Hi! I'm new into IT, currently I'm trying to learn Python :)
4e53194085271c4516b3fb79e77f320f5c53cb2b
[ "Markdown" ]
1
Markdown
bubbles-beep/hello-world
b85e9a1a1408e732b2f02493c5bbe7b0aaebb916
6ce38d9165d237450dd9b08b863c0ed58b7a0a55
refs/heads/master
<file_sep># fedslscrollreveal.github.io<file_sep><!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>FEDSL Parallax Scroll Reveal</title> <link rel="stylesheet" type="text/css" href="css/custom.min.css"> <link rel="stylesheet" type="text/css" href="css/style.css"> </head> <body> <header class="header"> <div class="back-head"></div> <div class="middle-head"></div> <div class="front-head"></div> </header> <section class="content"> <h1>Lorem Ipsum</h1> <p class="para">Curabitur non nulla sit amet nisl tempus convallis quis ac lectus. Pellentesque in ipsum id orci porta dapibus. Curabitur aliquet quam id dui posuere blandit. Curabitur arcu erat, accumsan id imperdiet et, porttitor at sem. Proin eget tortor risus. Pellentesque in ipsum id orci porta dapibus. Quisque velit nisi, pretium ut lacinia in, elementum id enim. Cras ultricies ligula sed magna dictum porta. Nulla quis lorem ut libero malesuada feugiat. Curabitur non nulla sit amet nisl tempus convallis quis ac lectus.</p> <div class="image-grid"> <div class="row"> <div class="columns"> <img src="images/grid-1.jpg" alt=""> </div> <div class="columns"> <img src="images/grid-2.jpg" alt=""> </div> <div class="columns"> <img src="images/grid-1.jpg" alt=""> </div> </div> <div class="row"> <div class="columns"> <img src="images/grid-2.jpg" alt=""> </div> <div class="columns"> <img src="images/grid-1.jpg" alt=""> </div> <div class="columns"> <img src="images/grid-2.jpg" alt=""> </div> </div> <div class="row"> <div class="columns"> <img src="images/grid-1.jpg" alt=""> </div> <div class="columns"> <img src="images/grid-2.jpg" alt=""> </div> <div class="columns"> <img src="images/grid-1.jpg" alt=""> </div> </div> </div> </section> <div style='min-height:2000px;'></div> <script defer src="js/jquery.min.js"></script> <script defer src="js/scrollreveal.min.js"></script> <script defer src="js/functions.min.js"></script> </body> </html>
2293f18a34f874915f6a89e9eb18ca26fef1928b
[ "Markdown", "HTML" ]
2
Markdown
willow1149/fedslscrollreveal.github.io
f3f3ca8367e494c4b837bf26f6e9988fc76b8d08
5030d6f80d1f9018816eb5d974fde6eb9de36aee
refs/heads/master
<file_sep>package de.evoila.cf.cpi.bosh; import de.evoila.cf.broker.bean.BoshProperties; import de.evoila.cf.broker.model.DashboardClient; import de.evoila.cf.broker.model.Plan; import de.evoila.cf.broker.model.ServiceInstance; import de.evoila.cf.broker.repository.PlatformRepository; import de.evoila.cf.broker.service.CatalogService; import de.evoila.cf.broker.service.availability.ServicePortAvailabilityVerifier; import io.bosh.client.deployments.Deployment; import io.bosh.client.errands.ErrandSummary; import io.bosh.client.vms.Vm; import org.springframework.boot.autoconfigure.condition.ConditionalOnBean; import org.springframework.core.env.Environment; import org.springframework.stereotype.Service; import rx.Observable; import java.util.List; import java.util.Optional; /** * Created by reneschollmeyer, evoila on 28.02.18. */ @Service @ConditionalOnBean(BoshProperties.class) public class MySQLBoshPlatformService extends BoshPlatformService { private static final int defaultPort = 3306; public MySQLBoshPlatformService(PlatformRepository repository, CatalogService catalogService, ServicePortAvailabilityVerifier availabilityVerifier, BoshProperties boshProperties, Optional<DashboardClient> dashboardClient, Environment environment) { super(repository, catalogService, availabilityVerifier, boshProperties, dashboardClient, new MySQLDeploymentManager(boshProperties, environment)); } public void runCreateErrands(ServiceInstance instance, Plan plan, Deployment deployment, Observable<List<ErrandSummary>> errands) {} protected void runUpdateErrands(ServiceInstance instance, Plan plan, Deployment deployment, Observable<List<ErrandSummary>> errands) {} protected void runDeleteErrands(ServiceInstance instance, Deployment deployment, Observable<List<ErrandSummary>> errands) { } @Override protected void updateHosts (ServiceInstance serviceInstance, Plan plan, Deployment deployment) { List<Vm> vms = super.getVms(serviceInstance); serviceInstance.getHosts().clear(); vms.forEach(vm -> serviceInstance.getHosts().add(super.toServerAddress(vm, defaultPort))); } @Override public void postDeleteInstance(ServiceInstance serviceInstance) { } } <file_sep># Service Broker This repository is part of our service broker project. For documentation see [evoila/cf-service-broker](https://github.com/evoila/cf-service-broker) # cf-service-broker-mysql Cloud Foundry Service Broker providing MySQL (MariaDB) Service Instances. Supports deployment to OpenStack and Existing MySQL servers. Configuration files and deployment scripts must be added.
4743e08f69a78e00966c163aebfb093893ef6647
[ "Java", "Markdown" ]
2
Java
AdaptiveThinking/osb-mysql
19bfdde2cc6dab1dd667f9b1027a5cdc60ff208e
f2f4c648d8ebad93961e95ce3cf18a7247a10c22
refs/heads/master
<repo_name>hereserin/chatterbox<file_sep>/frontend/components/chat_index_item.jsx import React from "react"; import { Link, withRouter } from "react-router-dom"; import { fetchChat } from "../actions/chat_actions"; import { clearSortOrderIds } from "../actions/order_actions"; import { connect } from "react-redux"; const ChatIndexItem = props => { const chatname = props.chatname[props.chatId] ? props.chatname[props.chatId].chatroom_name : "***Nuthin***"; return ( <div className="chat-index-item" onClick={() => { props.clearSortOrderIds(); props.handleClick(props.chatId); props.history.push(`/chats/${props.chatId}`); }} > {chatname} </div> ); }; const mapStateToProps = (state, ownProps) => { return { chatname: state.entities.chats }; }; const mapDispatchToProps = dispatch => { return { fetchChat: id => { return dispatch(fetchChat(id)); }, clearSortOrderIds: () => { return dispatch(clearSortOrderIds()); } }; }; export default withRouter( connect( mapStateToProps, mapDispatchToProps )(ChatIndexItem) ); <file_sep>/app/views/api/chat_messages/index.json.jbuilder json.messages do @chat.chat_messages.each do |message| json.set! message.id do json.partial! 'message', message: message end end end json.order @chat.chat_messages.pluck(:id) json.users do @chat.chat_messages.each do |message| json.set! message.user.id do json.extract! message.user, :id, :username end end end <file_sep>/app/channels/application_cable/channel.rb module ApplicationCable class Channel < ActionCable::Channel::Base end private end <file_sep>/frontend/actions/message_actions.jsx import * as MessageAPIUtil from "../util/message_api_util"; export const RECEIVE_MESSAGE = "RECEIVE_MESSAGE"; export const RECEIVE_MESSAGES = "RECEIVE_MESSAGES"; export const RECEIVE_MESSAGE_ERRORS = "RECEIVE_MESSAGE_ERRORS"; export const START_LOADING_MESSAGES = "START_LOADING_MESSAGES"; export const START_LOADING_MESSAGE = "START_LOADING_MESSAGE"; export const submitMessage = message => dispatch => MessageAPIUtil.makeMessage(message).then( message => { return dispatch(receiveMessage(message)); }, errors => dispatch(receiveMessageErrors(errors)) ); export const fetchMessage = id => dispatch => { MessageAPIUtil.fetchMessage(id).then( message => dispatch(receiveMessage(message)), errors => dispatch(receiveMessageErrors(errors)) ); }; export const fetchMessages = chatId => dispatch => { dispatch(startLoadingMessages()); return MessageAPIUtil.fetchMessages(chatId).then( messages => dispatch(receiveMessages(messages)), errors => dispatch(receiveMessageErrors(errors)) ); }; const receiveMessage = ({ message, order, user }) => { return { type: RECEIVE_MESSAGE, message: message, user: user }; }; const receiveMessages = ({ messages, order, users }) => { return { type: RECEIVE_MESSAGES, messages: messages, users: users, order: order }; }; const receiveMessageErrors = errors => { return { type: RECEIVE_MESSAGE_ERRORS, errors: errors }; }; export const startLoadingMessages = () => ({ type: START_LOADING_MESSAGES }); export const startLoadingMessage = () => ({ type: START_LOADING_MESSAGE }); <file_sep>/app/models/chat_message.rb class ChatMessage < ApplicationRecord validates :content, presence: true belongs_to :chat, class_name: :Chat belongs_to :user, class_name: :User after_create_commit do ChatMessageCreationEventBroadcastJob.perform_later(self) end def formatted_time self.created_at.strftime('%H:%M') end end <file_sep>/frontend/components/new_chat_form.jsx import React from "react"; import { Link, withRouter } from "react-router-dom"; import { connect } from "react-redux"; import { closeModal } from "../actions/modal_actions"; import { modalMaker } from "./modal"; import { submitChat } from "../actions/chat_actions"; class NewChatForm extends React.Component { constructor(props) { super(props); this.handleChange = this.handleChange.bind(this); this.handleSubmit = this.handleSubmit.bind(this); this.showErrors = this.showErrors.bind(this); this.handleSessionSubmissionResponse = this.handleSessionSubmissionResponse.bind( this ); this.state = { newChatName: "" }; } handleChange(e) { this.setState({ newChatName: e.target.value }); } handleSubmit() { const newChat = { name: this.state.newChatName }; this.props.submitChat(newChat); this.handleSessionSubmissionResponse(); this.setState({ newChatName: "" }); } handleSessionSubmissionResponse() { if (this.props.errors.length === 0) { if (this.props.currentUserId !== null) { this.props.closeModal(); } } } showErrors() { // if (this.props.errors) { // const errorsList = this.props.errors.forEach(error => { // return <li>error</li>; // }); // return <ol>{errorsList}</ol>; // } else { // return null; // } } render() { return ( <div className="new-chat-form"> Start a Chat <span> <input type="text" placeholder="Name your Chat" value={this.state.newChatName} onChange={this.handleChange} /> <Link to="/chats"> <button onClick={this.handleSubmit}>Create Chat</button> </Link> </span> </div> ); } } const mapStateToProps = state => { return { errors: state.errors.chat }; }; const mapDispatchToProps = dispatch => { return { submitChat: chat => { dispatch(submitChat(chat)); }, closeModal: () => { dispatch(closeModal()); } }; }; export default modalMaker( connect( mapStateToProps, mapDispatchToProps )(NewChatForm) ); <file_sep>/app/controllers/api/chats_controller.rb class Api::ChatsController < ApplicationController def index @chats = Chat.order(:created_at) render 'api/chats/index' end def show @chat = Chat.find(params[:id]) render 'api/chats/show' end def create new_chat_input = { user_id: current_user.id, chatroom_name: chat_params[:name] } @chat = Chat.new(new_chat_input) if @chat.save render 'api/chats/show' else render json: @chat.errors.full_messages, status: 422 end end private def chat_params params.require(:chat).permit(:name) end end <file_sep>/frontend/components/components/chat_show.jsx import React from "react"; import { Link } from "react-router-dom"; import { connect } from "react-redux"; import { fetchChat } from "../actions/chat_actions"; import { clearSortOrderIds } from "../actions/order_actions"; import Loader from "./loading_symbol"; import MessageInChat from "./message_in_chat"; import MessageIndex from "./messages_index"; import NewMessageForm from "./new_message_form"; import { fetchMessages } from "../actions/message_actions"; import { ActionCableConsumer } from "react-actioncable-provider"; import Cable from "./cable"; class ChatShow extends React.Component { constructor(props) { super(props); this.handleReceivedMessage = this.handleReceivedMessage.bind(this); this.state = { messages: [], activeChat: null }; } componentDidMount() { this.props.clearSortOrderIds(); this.props.fetchChat(this.props.chatId); this.props.fetchMessages(this.props.chatId); } handleReceivedChat(response) { debugger; const { chat } = response; console.log(); this.setState({ messages: [...this.state.messages, chat] }); } handleReceivedMessage(response) { debugger; console.log("SUCCESS"); console.log(response); // const { message } = response; // const chats = [...this.state.chats]; // const chat = chats.find(chat => chat.id === message.chat_id); // chat.message = [...chat.messages, message]; // this.setState({ chat }); } handleRejectedMessage() { console.log("REJECTED"); } handleConnectedMessage() { console.log("CONNECTED"); } render() { if (this.props.loading) { return <Loader />; } // <ActionCableConsumer // channel={{ channel: "ChatsChannel" }} // onReceived={this.handleReceivedChat} // /> // {this.state.chats.length ? ( // <Cable // chats={chats} // handleReceivedMessage={this.handleReceivedMessage} // /> // ) : null} // <ActionCableConsumer // channel={{ channel: "MessagesChannel", chat: this.props.chatId }} // onReceived={this.handleReceivedMessage} // /> return ( <div> <p>actioncablehere:</p> <ActionCableConsumer channel={{ channel: "ChatsChannel", chat: this.props.chatId }} onReceived={this.handleReceivedMessage} onRejected={this.handleRejectedMessage} onConnected={this.handleConnectedMessage} /> <h2> Chat no. {this.props.chatId} : </h2> <MessageIndex chatId={this.props.chatId} /> <NewMessageForm /> </div> ); } } const mapStateToProps = ({ order, ui, entities }, ownProps) => { return { messageIds: order, loading: ui.loading.index, chatId: ownProps.match.params.chatId, chat: entities.chats[ownProps.match.params.chatId] }; }; const mapDispatchToProps = dispatch => { return { fetchChat: id => { return dispatch(fetchChat(id)); }, clearSortOrderIds: () => { return dispatch(clearSortOrderIds()); }, fetchMessages: chatId => { return dispatch(fetchMessages(chatId)); } }; }; export default connect( mapStateToProps, mapDispatchToProps )(ChatShow); <file_sep>/frontend/components/components/login_form_container.jsx import { connect } from "react-redux"; import { login, loginDefault } from "./../actions/session_actions"; import SessionForm from "./session_form"; import { withRouter } from "react-router-dom"; const mapStateToProps = state => { return { errors: state.errors.session, currentUserId: state.session.id, formType: "login", formTitle: "Log in" }; }; const mapDispatchToProps = dispatch => { return { processForm: user => { dispatch(login(user)); }, processDemo: user => { dispatch(loginDefault()); }, closeModal: modal => dispatch(closeModal()), openModal: type => dispatch(openModal(type)) }; }; export default withRouter( connect( mapStateToProps, mapDispatchToProps )(SessionForm) ); <file_sep>/frontend/components/greeting.jsx import { Link, withRouter } from "react-router-dom"; // import LogoutButton from "./logout_button.jsx"; import React from "react"; import { connect } from "react-redux"; import { logout } from "./../actions/session_actions"; const Greeting = props => { let greetingMessage; const logoutButton = ( <div> <button onClick={props.logout} className="logout-button"> Log Out </button> </div> ); if (props.currentUser) { greetingMessage = ( <div> <li>Hi, {props.currentUser.username}</li> {logoutButton} </div> ); } else { greetingMessage = ( <div className="login-signup"> <Link to="/login">Login</Link> &nbsp;or&nbsp; <Link to="/signup">Sign up!</Link> </div> ); } return greetingMessage; }; // commented out stuff: // <Link to='/signup' className="sign-up-nav-link">Sign Up</Link> // <Link to='/login' className="login-nav-link">Sign In</Link> const mapStateToProps = ({ session, entities }) => { return { currentUser: entities.users[session.id] }; }; const mapDispatchToProps = dispatch => { return { logout: () => dispatch(logout()) }; }; export default connect( mapStateToProps, mapDispatchToProps )(Greeting); <file_sep>/frontend/util/message_api_util.js export const makeMessage = message => $.ajax({ method: "POST", url: `/api/chat_messages`, data: { message } }); export const fetchMessages = chat_id => { return $.ajax({ method: "GET", url: `/api/chats/${chat_id}/chat_messages` }); }; export const fetchMessage = id => $.ajax({ method: "GET", url: `/api/chat_messages/${id}` }); <file_sep>/frontend/reducers/sort_order_reducer.jsx import { merge } from "lodash"; import { RECEIVE_CURRENT_USER, LOGOUT_CURRENT_USER } from "./../actions/session_actions"; import { RECEIVE_CHATS, RECEIVE_CHAT } from "./../actions/chat_actions"; import { CLEAR_ORDER } from "./../actions/order_actions"; import { RECEIVE_MESSAGE, RECEIVE_MESSAGES } from "./../actions/message_actions"; const initialState = []; const sortingOrderReducer = (state = initialState, action) => { Object.freeze(state); switch (action.type) { case RECEIVE_CHATS: return action.order; case RECEIVE_CHAT: let newState = [action.order, ...state]; return newState; case RECEIVE_MESSAGES: return action.order; case RECEIVE_MESSAGE: let newState2 = [...state, action.message.id]; return newState2; case LOGOUT_CURRENT_USER: case CLEAR_ORDER: return initialState; default: return state; } }; export default sortingOrderReducer; <file_sep>/frontend/components/components/session_form.jsx import React from "react"; import { Link } from "react-router-dom"; import { withRouter } from "react-router-dom"; class SessionForm extends React.Component { constructor(props) { super(props); this.handleSubmit = this.handleSubmit.bind(this); this.notThisFormLink = this.notThisFormLink.bind(this); this.handleChange = this.handleChange.bind(this); this.handleSessionSubmissionResponse = this.handleSessionSubmissionResponse.bind( this ); this.state = { username: "", password: "" }; } handleChange(type) { return e => { this.setState({ [type]: e.target.value }); }; } handleSubmit(inputType) { return e => { e.preventDefault(); const user = Object.assign({}, this.state); if (inputType === "demo") { this.props.processDemo(); } else { this.props.processForm(user); } this.setState({ password: "" }); }; } handleSessionSubmissionResponse() { if (this.props.errors.length === 0) { if (this.props.currentUserId !== null) { this.props.history.push(`/chats`); } } } componentDidMount() { this.handleSessionSubmissionResponse(); } errorsList() { const currentErrors = this.props.errors.map((error, idx) => { return <li key={idx}>{error}</li>; }); return <ul className="errors-list">{currentErrors}</ul>; } notThisFormLink() { if (this.props.formType === "login") { return ( <p> Don't have an account yet? <Link to="/signup">Sign up</Link> today, and start chatting! </p> ); } return ( <p> Already have an account?&nbsp;<Link to="/login">Login here</Link> </p> ); } render() { let inputClass = "session-form-modal-box-input"; if (Object.keys(this.props.errors).length > 0) { inputClass = "session-form-modal-box-input-with-errors"; } return ( <div className="session-form"> <span> <h2>{this.props.formTitle}</h2> <ul>{this.errorsList()}</ul> <form onSubmit={this.handleSubmit("user")}> <input type="text" placeholder="email" className={inputClass} value={this.state.username} onChange={this.handleChange("username")} /> <br /> <input type="password" placeholder="<PASSWORD>" className={inputClass} value={this.state.password} onChange={this.handleChange("password")} /> <br /> <button>{this.props.formType}</button> </form> <form onSubmit={this.handleSubmit("demo")}> <button>demo user</button> </form> {this.notThisFormLink()} </span> </div> ); } } export default withRouter(SessionForm); <file_sep>/app/controllers/api/chat_messages_controller.rb class Api::ChatMessagesController < ApplicationController def index id = params[:chat_id] @chat = Chat.includes(:chat_messages).find(id) # @chat = Chat.includes(:messages).includes(:users).order(:created_at).find(id) @messages = @chat.chat_messages render 'api/chat_messages/index' end def show @message = ChatMessage.includes(:user).find(params[:id]) render 'api/chat_messages/show' end def create new_message_input = { user_id: current_user.id, chat_id: message_params[:chat_id].to_i, content: message_params[:body] } @message = ChatMessage.new(new_message_input) chat = Chat.find(params[:message][:chat_id].to_i) chat_id = chat.id if @message.save render 'api/chat_messages/show' else render json: @message.errors.full_messages, status: 422 end end private def message_params params.require(:message).permit(:chat_id, :body) end end <file_sep>/frontend/components/messages_index.jsx import React from "react"; import MessageInChat from "./message_in_chat"; import { Link } from "react-router-dom"; import { connect } from "react-redux"; import { fetchMessages } from "../actions/message_actions"; import Loader from "./loading_symbol"; const MessageIndex = props => { const composeListItems = () => { if (props.messageIds[0] === undefined) { return "No messages yet..."; } const messageFeed = props.messageIds.map(messageId => { return <MessageInChat messageId={messageId} key={messageId} />; }); return messageFeed; }; if (props.loading) { return <Loader />; } return <ul className="message-index">{composeListItems()}</ul>; }; const mapStateToProps = ({ order, ui }) => { return { messageIds: order, loading: ui.loading.index }; }; const mapDispatchToProps = dispatch => { return { fetchMessages: chatId => { return dispatch(fetchMessages(chatId)); } }; }; export default connect( mapStateToProps, mapDispatchToProps )(MessageIndex); <file_sep>/app/views/api/chat_messages/show.json.jbuilder json.message do json.extract! @message, :id, :created_at, :content, :user_id end json.user do json.extract! @message.user, :id, :username end json.order do json.extract! @message, :id end <file_sep>/frontend/components/chatroom_area.jsx import React, { Component } from "react"; import Cable from "actioncable"; import { connect } from "react-redux"; import Loader from "./loading_symbol"; import { clearSortOrderIds } from "../actions/order_actions"; import { fetchMessages } from "../actions/message_actions"; import { fetchChat } from "../actions/chat_actions"; class ChatroomArea extends Component { constructor(props) { super(props); this.state = { currentChatMessage: "", chatLogs: [], chatroomName: "* room name *" }; } componentWillMount() { this.createSocket(); } componentDidMount() { this.props.clearSortOrderIds(); this.props .fetchMessages(this.props.chatId) .then(() => this.setState({ chatLogs: this.mapMessagesToChatLog() })); this.props.fetchChat(this.props.chatId).then(() => this.setState({ chatroomName: this.props.chats[this.props.chatId].chatroom_name }) ); this.scrollToBottom(); } componentDidUpdate() { this.scrollToBottom(); } scrollToBottom() { this.messagesEnd.scrollIntoView({ behavior: "smooth" }); } mapMessagesToChatLog() { const mapped = this.props.messageIds.map(messageId => { return this.props.messages[messageId]; }); return mapped; } updateCurrentChatMessage(event) { this.setState({ currentChatMessage: event.target.value }); } handleSendEvent(event) { event.preventDefault(); this.chats.create({ chat_id: this.props.match.params.chatId, content: this.state.currentChatMessage, user_id: this.props.currentUser }); this.setState({ currentChatMessage: "" }); } handleChatInputKeyPress(event) { if (event.key === "Enter") { this.handleSendEvent(event); } //end if } createSocket() { let cable = Cable.createConsumer("/cable"); this.chats = cable.subscriptions.create( { channel: "ChatChannel" }, { connected: () => {}, received: data => { let chatLogs = this.state.chatLogs; chatLogs.push(data); this.setState({ chatLogs: chatLogs }); }, create: function(chatMessage) { this.perform("create", chatMessage); } } ); } renderChatLog() { const chatLog = this.state.chatLogs ? ( this.state.chatLogs.map(el => { return ( <li key={`chat_${el.id}`}> <div className="message"> <div className="message-info"> <span className="message-user">{el.user_id}</span> <span className="message-created-at"> {el.formatted_time} </span>{" "} </div> <span className="chat-message-container"> <div className="chat-message">{el.content}</div> </span> </div> </li> ); }) ) : ( <li>no messages yet</li> ); return chatLog; } render() { return ( <div className="chatroom-area"> <div className="stage"> <h1 className="chat-show-title">{this.state.chatroomName}</h1> <div className="chat-logs-container"> <ul className="chat-logs">{this.renderChatLog()}</ul> <div style={{ float: "left", clear: "both" }} ref={el => { this.messagesEnd = el; }} /> </div> <div className="chat-input-area"> <input onKeyPress={e => this.handleChatInputKeyPress(e)} value={this.state.currentChatMessage} onChange={e => this.updateCurrentChatMessage(e)} type="text" placeholder="Send message..." className="chat-input" /> <button onClick={e => this.handleSendEvent(e)} className="send"> <i class="fas fa-paper-plane" /> </button>{" "} </div> </div> </div> ); } } const mapStateToProps = ({ order, ui, entities, session }, ownProps) => { return { messages: entities.messages, loading: ui.loading.index, chatId: ownProps.match.params.chatId, currentUser: session.id, messageIds: order, chats: entities.chats }; }; const mapDispatchToProps = dispatch => { return { fetchChat: chatId => { return dispatch(fetchChat(chatId)); }, fetchMessages: chatId => { return dispatch(fetchMessages(chatId)); }, clearSortOrderIds: () => { return dispatch(clearSortOrderIds()); } }; }; export default connect( mapStateToProps, mapDispatchToProps )(ChatroomArea); <file_sep>/frontend/reducers/entities_reducer.jsx import { combineReducers } from "redux"; import usersReducer from "./user_reducer"; import chatsReducer from "./chat_reducer"; import messagesReducer from "./message_reducer"; const entitiesReducer = combineReducers({ users: usersReducer, chats: chatsReducer, messages: messagesReducer }); export default entitiesReducer; <file_sep>/app/views/api/chats/show.json.jbuilder json.chat do json.partial! 'chat', chat: @chat end <file_sep>/frontend/actions/order_actions.jsx export const CLEAR_ORDER = "CLEAR_ORDER"; export const clearSortOrderIds = () => { return { type: CLEAR_ORDER }; }; <file_sep>/app/views/api/chats/_chat.json.jbuilder json.extract! chat, :id, :chatroom_name, :created_at <file_sep>/frontend/components/modal.jsx import React from "react"; import { connect } from "react-redux"; import { closeModal } from "../actions/modal_actions"; const asModal = SomeComponent => { const Modal = props => { if (!props.modal) { return null; } return ( <div className="modal-background" onClick={() => { props.history.goBack(); props.closeModal(); }} > <i className="fas fa-times closing-x" /> <div className="modal-child" onClick={e => e.stopPropagation()}> <SomeComponent /> </div> </div> ); }; return Modal; }; const mapStateToProps = state => { return { modal: state.ui.modal }; }; const mapDispatchToProps = dispatch => { return { closeModal: () => dispatch(closeModal()) }; }; export const modalMaker = SomeForm => { return connect( mapStateToProps, mapDispatchToProps )(asModal(SomeForm)); }; <file_sep>/app/views/api/chats/index.json.jbuilder json.chats do @chats.each do |chat| json.set! chat.id do json.partial! 'chat', chat: chat end end end json.order @chats.pluck(:id) <file_sep>/frontend/util/chat_api_util.js export const makeChat = chat => $.ajax({ method: "POST", url: `/api/chats`, data: { chat } }); export const fetchChats = () => $.ajax({ method: "GET", url: `/api/chats` }); export const fetchChat = id => $.ajax({ method: "GET", url: `/api/chats/${id}` }); <file_sep>/frontend/actions/chat_actions.jsx import * as ChatAPIUtil from "../util/chat_api_util"; export const RECEIVE_CHAT = "RECEIVE_CHAT"; export const RECEIVE_CHATS = "RECEIVE_CHATS"; export const RECEIVE_CHAT_ERRORS = "RECEIVE_CHAT_ERRORS"; export const START_LOADING_CHATS = "START_LOADING_CHATS"; export const START_LOADING_CHAT = "START_LOADING_CHAT"; export const submitChat = chat => dispatch => ChatAPIUtil.makeChat(chat).then( chat => { return dispatch(receiveChat(chat)); }, errors => dispatch(receiveChatErrors(errors)) ); // export const submitChat = chat => { // return dispatch => { // return ChatAPIUtil.makeChat(chat).then( // chat => { // return dispatch(receiveChat(chat)); // }, // errors => { // return dispatch(receiveChatErrors(errors)); // } // ); // }; // }; export const fetchChat = id => dispatch => { dispatch(startLoadingChat()); return ChatAPIUtil.fetchChat(id).then( chat => dispatch(receiveChat(chat)), errors => dispatch(receiveChatErrors(errors)) ); }; export const fetchChats = () => dispatch => { dispatch(startLoadingChats()); return ChatAPIUtil.fetchChats().then( chats => dispatch(receiveChats(chats)), errors => dispatch(receiveChatErrors(errors)) ); }; const receiveChat = ({ chat, order }) => { return { type: RECEIVE_CHAT, chat: chat }; }; const receiveChats = ({ chats, order }) => { return { type: RECEIVE_CHATS, chats: chats, order: order }; }; const receiveChatErrors = errors => { return { type: RECEIVE_CHAT_ERRORS, errors: errors }; }; export const startLoadingChats = () => ({ type: START_LOADING_CHATS }); export const startLoadingChat = () => ({ type: START_LOADING_CHAT }); <file_sep>/app/channels/chat_channel.rb class ChatChannel < ApplicationCable::Channel def subscribed stream_from 'chat_channel' end def unsubscribed; end def create(opts) # debugger # ChatMessage.create( # content: opts.fetch('content') # ) new_message_input = { user_id: opts.fetch('user_id'), chat_id: opts.fetch('chat_id').to_i, content: opts.fetch('content') } message = ChatMessage.new(new_message_input) if message.save # render 'api/chat_messages/show' else # render json: message.errors.full_messages, status: 422 end end end <file_sep>/frontend/reducers/message_reducer.jsx import { merge } from "lodash"; import { RECEIVE_MESSAGE, RECEIVE_MESSAGES } from "./../actions/message_actions"; const initialState = {}; const messagesReducer = (state = initialState, action) => { Object.freeze(state); switch (action.type) { case RECEIVE_MESSAGE: return merge({}, state, { [action.message.id]: action.message }); case RECEIVE_MESSAGES: return merge({}, state, action.messages); default: return state; } }; export default messagesReducer; <file_sep>/frontend/components/components/nav_bar.jsx import React from "react"; import { connect } from "react-redux"; import { logout } from "./../actions/session_actions"; const NavBar = props => { return ( <nav> <div onClick={props.logout}> <i className="fas fa-bars" /> </div> <div onClick={() => { props.history.push(`/chats`); }} > Chatterbox </div> <div> <i className="fas fa-search" /> </div> </nav> ); }; const mapDispatchToProps = dispatch => { return { logout: () => dispatch(logout()) }; }; export default connect( null, mapDispatchToProps )(NavBar); <file_sep>/app/views/api/chat_messages/_message.json.jbuilder json.extract! message, :id, :created_at, :formatted_time, :content, :user_id <file_sep>/frontend/reducers/loading_reducer.jsx const initialState = { index: false, showItem: false }; const loadingReducer = (state = initialState, action) => { Object.freeze(state); switch (action.type) { default: return state; } }; export default loadingReducer; <file_sep>/frontend/reducers/chat_reducer.jsx import { merge } from "lodash"; import { RECEIVE_CHAT, RECEIVE_CHATS } from "./../actions/chat_actions"; const initialState = {}; const chatsReducer = (state = initialState, action) => { Object.freeze(state); switch (action.type) { case RECEIVE_CHAT: return merge({}, state, { [action.chat.id]: action.chat }); case RECEIVE_CHATS: return merge({}, state, action.chats); default: return state; } }; export default chatsReducer; <file_sep>/frontend/components/new_chat_button.jsx import React from "react"; import { openModal } from "../actions/modal_actions"; import { withRouter } from "react-router-dom"; import { connect } from "react-redux"; const NewChatButton = props => { return ( <div className="new-chat-button" onClick={() => { props.openModal(); props.history.push(`/chats/new`); }} > <i className="fas fa-edit" /> </div> ); }; const mapDispatchToProps = dispatch => { return { openModal: () => { dispatch(openModal()); } }; }; export default withRouter( connect( null, mapDispatchToProps )(NewChatButton) ); <file_sep>/app/models/chat.rb class Chat < ApplicationRecord has_many :chat_messages, dependent: :destroy has_many :users, through: :chat_memberships end
00f00b6c44a0ed62d2a3f999a46d3e6cfbbeba97
[ "JavaScript", "Ruby" ]
33
JavaScript
hereserin/chatterbox
b35e66feebe72376115fc354bd3b11174227e5d8
9cea636c7543fc214ddacf3442ed96ecc1af19eb
refs/heads/master
<file_sep>/// SYNTAX TEST "Packages/C#/C#.sublime-syntax" global using Newtonsoft.Json; /// ^^ storage.modifier /// ^^^^^ keyword.control.import /// ^^^^^^^^^^^^^^^ meta.path /// ^ punctuation.separator.namespace /// ^ punctuation.terminator - meta.path global using static Console.WriteLine; /// ^^ storage.modifier /// ^^^^^ keyword.control.import /// ^^^^^^ keyword.control.import /// ^^^^^^^^^^^^^^^^^ meta.path /// ^ punctuation.separator.namespace /// ^ punctuation.terminator namespace Example; ///^^^^^^ meta.namespace storage.type.namespace /// ^^^^^^^ meta.namespace entity.name.namespace /// ^ punctuation.terminator.statement public record struct Person(string Name); /// ^^ storage.modifier.access /// ^^^^^^^^^^^^^^^^^^^^ meta.class.record /// ^^^^^^ storage.type.class.record /// ^^^^^^ storage.type.struct.record /// ^^^^^^ entity.name.class /// ^ punctuation.section.group.begin /// ^^^^^^^^^^^^ meta.class.body meta.method.parameters /// ^^^^^^ storage.type /// ^^^^ variable.parameter /// ^ punctuation.section.parameters.end /// ^ punctuation.terminator.statement public readonly record struct Person(string Name); ///^^^ storage.modifier.access /// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ meta.class.record /// ^^^^^^^^ storage.modifier /// ^^^^^^ storage.type.class.record /// ^^^^^^ storage.type.struct.record /// ^^^^^^ entity.name.class /// ^ punctuation.section.group.begin /// ^^^^^^ storage.type /// ^^^^ variable.parameter /// ^ punctuation.section.parameters.end /// ^ punctuation.terminator.statement
10ffaaeeab1821f0649990be356348c499584ade
[ "C#" ]
1
C#
willstott101/Packages
bf13543d9f06c625039c92d7b986f32aba380adf
e2f76e2d422ce3fbdbb48a286a87ed581505931f